Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save oliver-batchelor/280bd8556e21ff3c07ad892f1448ee1e to your computer and use it in GitHub Desktop.
Save oliver-batchelor/280bd8556e21ff3c07ad892f1448ee1e to your computer and use it in GitHub Desktop.
Render points as depth map
import argparse
from functools import partial
import math
from pathlib import Path
from camera_geometry import FrameSet
from camera_geometry.scan.views import load_frames_with, undistort_cameras
from camera_geometry import Camera
import cv2
import numpy as np
import open3d as o3d
import torch
from taichi_splatting import Gaussians3D, render_gaussians, perspective
import taichi as ti
def to_camera_params(camera:Camera):
return perspective.CameraParams(
T_image_camera = torch.tensor(camera.intrinsic, dtype=torch.float32),
T_camera_world = torch.tensor(camera.camera_t_parent, dtype=torch.float32),
near_plane = 0.1,
far_plane = 100.0,
image_size = camera.image_size
)
def main():
parser = argparse.ArgumentParser(description="Render depth maps from pointcloud")
parser.add_argument("--scan", type=Path, help="Path to the scan file")
parser.add_argument("--cloud", type=Path, help="Path to the point cloud file")
parser.add_argument("--point_size", type=float, default=0.001, help="Point size in meters")
parser.add_argument("--device", type=str, default="cuda:0", help="Device to use")
parser.add_argument("--image_size", type=float, default=1024)
args = parser.parse_args()
ti.init(arch=ti.cuda)
scan = FrameSet.load_file(args.scan)
undistortion = undistort_cameras(scan.cameras)
cameras = [undistortion.undistorted.transform(pose)
for _, undistortion in undistortion.items()
for _, pose in enumerate(scan.rig_poses)]
points = o3d.io.read_point_cloud(str(args.cloud))
positions = np.asarray(points.points)
if points.has_colors():
colors = np.asarray(points.colors)
else:
colors = np.ones_like(positions)
log_size = math.log(args.point_size)
positions = torch.from_numpy(positions).to(dtype=torch.float32)
gaussians = Gaussians3D(
position = positions,
feature = torch.from_numpy(colors).to(dtype=torch.float32),
log_scaling = torch.full_like(positions, log_size, dtype=torch.float32),
rotation = torch.tensor([0, 0, 0, 1], dtype=torch.float32).repeat(positions.shape[0], 1),
alpha_logit = torch.full((positions.shape[0], 1), 10.0, dtype=torch.float32),
batch_size = (positions.shape[0],)
)
gaussians = gaussians.to(device=args.device)
packed = gaussians.packed()
camera:Camera
for camera in cameras:
camera = camera.scale_image(scale_x=args.image_size / camera.image_size[0])
camera_params = to_camera_params(camera).to(device=args.device)
rendering = render_gaussians(packed, features=gaussians.feature, camera_params=camera_params, render_depth=True)
depth = (rendering.depth * 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
colormapped = cv2.applyColorMap(depth, cv2.COLORMAP_JET)
cv2.imshow("Depth", colormapped)
cv2.waitKey(0)
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment