Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save saurabheights/7e6801a6bf520d783e46156c38502d9c to your computer and use it in GitHub Desktop.
Save saurabheights/7e6801a6bf520d783e46156c38502d9c to your computer and use it in GitHub Desktop.
def transformPixelBetweenCameras(pixelLocationInSrcFrame,
depth,
srcCameraMatrix,
srcCameraDistortionCoefficients,
srcCameraRVec,
srcCameraTVec,
dstCameraMatrix,
dstCameraDistortionCoefficients,
dstCameraRVec,
dstCameraTVec):
srcFrameHomogeneousPixelLoc = np.array([pixelLocationInSrcFrame[0], pixelLocationInSrcFrame[1], 1.0],
dtype=np.float64)
srcFrameHomogeneousPixelLoc.reshape((3, 1))
if depth == 0 or depth > 1500: # Static Camera was too close to board but still some values are above 6000
depth = 1000
else:
print("Using depth frames depth: %d".format(depth))
# Compute R and T matrix for source frame
srcCameraR, _ = cv2.Rodrigues(srcCameraRVec)
srcCameraT = srcCameraTVec
# Compute Rinv and Tinv matrix for source frame
srcCameraRInv = srcCameraR.transpose()
srcCameraTInv = np.matmul(srcCameraRInv, -srcCameraT)
# First undistort the source image pixel location.
pixelLocationInSrcFrame = cv2.undistort(srcFrameHomogeneousPixelLoc, srcCameraMatrix,
srcCameraDistortionCoefficients, srcCameraMatrix)
# Compute Camera inverse matrix and use Rinv and Tinv matrix to find XYZ location.
srcCameraMatrixInv = np.linalg.inv(srcCameraMatrix)
srcCameraCoordinates = np.matmul(srcCameraMatrixInv, srcFrameHomogeneousPixelLoc)
xyzPoint = (np.matmul(srcCameraRInv, srcCameraCoordinates)).transpose() + srcCameraTInv.transpose()
xyzPoint = xyzPoint * depth/1000.0
# The reverse, moving xyzPoint to pixel in destination can be done with a single opencv call
dstImagePoints, _ = cv2.projectPoints(xyzPoint,
dstCameraRVec,
dstCameraTVec,
dstCameraMatrix,
dstCameraDistortionCoefficients)
return dstImagePoints
@saurabheights
Copy link
Author

Call it as:-

# Take 0,0,0 as world points
axisPoints = np.array([0,0,0]).reshape((1,3))
# Find their location in Static Camera
axisStaticPixelLocation, _ = cv2.projectPoints(axisPoints, rvec_static, tvec_static, rgbd_camera_matrix,
                                                           rgbd_dist_coeffs)
pixelLocationInStaticFrame = (int(axisStaticPixelLocation[0][0][0]), int(axisStaticPixelLocation[0][0][1]))

if pixelLocationInStaticFrame[0] < input_img_static_gray.shape[0] and \
    pixelLocationInStaticFrame[1] < input_img_static_gray.shape[1]:
    depth = depth_img[pixelLocationInStaticFrame[0]][pixelLocationInStaticFrame[1]]
else:
    depth = 0

# Call above function with pixelLocationInStaticFrame
pixelLocationInDynamicFrame = transformPixelBetweenCameras(pixelLocationInStaticFrame,
                                                                       depth,
                                                                       rgbd_camera_matrix,
                                                                       rgbd_dist_coeffs,
                                                                       rvec_static,
                                                                       tvec_static,
                                                                       rgb_camera_matrix,
                                                                       rgb_dist_coeffs,
                                                                       rvec_dynamic,
                                                                       tvec_dynamic)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment