Skip to content

Instantly share code, notes, and snippets.

@coxep
Last active August 9, 2018 03:11
Show Gist options
  • Save coxep/fc5f473de96d51e07ac5c824c83dd7e4 to your computer and use it in GitHub Desktop.
Save coxep/fc5f473de96d51e07ac5c824c83dd7e4 to your computer and use it in GitHub Desktop.
Simple python script for projecting a camera pixel into the ground plane. Require full camera calibration (intrinsic/extrinsic parameters)
#!/usr/bin/env python
"""project_pixel_to_floor.py project an image pixel into the ground plane"""
__author__ = "Eric Cox"
__maintainer__ = "Eric Cox"
__email__ = "eric_cox@jabil.com"
import numpy as np
from numpy.linalg import inv
from tf import transformations as tfx
def project_camera_point_to_ground_plane(pixel_coordinate,
camera_position,
camera_orientation,
camera_intrinsics):
""" Inputs:
pixel_coordinate: list [u, v] containing pixel coordinate
camera_position: list [x, y, z] describing camera origin
camera_orientaion: list [qx, qy, qz, qw] describing camera orientation
camera_intrinsics: list of 9 camera parameters
Outputs:
floor_point: list [x, y, z] containing pixel projection in robot frame
"""
# Find ground plane normal in camera frame
rotation = np.matrix(tfx.quaternion_matrix(camera_orientation))[0:3, 0:3]
plane_normal_in_robot_frame = np.matrix([[0.], [0.], [camera_position[2]]])
plane_normal = -rotation.T * plane_normal_in_robot_frame
# Solve for unit vector describing pixel location in camera frame
# TODO: If distortion is known, we must also take it into account
camera_vector = inv(camera_intrinsics) * np.matrix(
pixel_coordinate + [1.]).T
# find scaling factor that projects normalized camera ray to floor plane
# TODO: avoid div0. Note that div0 only occurs when unit vector is
# parallel to ground plane. In this case, it is silly to try to project it
# into the ground plane to begin with... ¯\_(ツ)_/¯
sf = camera_position[2] ** 2 / np.dot(camera_vector.T.tolist()[0],
plane_normal.T.tolist()[0])
camera_vector = sf * camera_vector
# Convert camera_vector to robot_frame
floor_point = np.matrix(camera_position).T + rotation * camera_vector
if np.abs(floor_point[2]) > 1e-6:
raise Exception("Point is not in the floor plane! Eric is dumb!")
else:
# Clear out any numeric error
floor_point[2] = 0.
return floor_point.T.tolist()[0]
if __name__ == "__main__":
# pose of camera with respect to base_footprint (could also be odom)
# `rosrun tf tf_echo /base_footprint /depthcam_proximity_front_depth_optical_frame`
quaternion = [0.703, 0.699, 0.095, 0.092]
translation = [0.109, -0.010, 1.813]
# Camera intrinsics are determined experimentally for each camera
# (This is given in the camera info topic)
K = np.matrix([[142.5851287841797, 0.0, 79.25],
[0.0, 142.5851287841797, 56.0],
[0.0, 0.0, 1.0]])
# Pixel coordinates of image boundaries
pts = [[0, 0], [0, 120], [160, 0], [160, 120]]
floor_pts = []
for pt in pts:
floor_pts.append(project_camera_point_to_ground_plane(pt,
translation,
quaternion,
K))
print(floor_pts)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment