Skip to content

Instantly share code, notes, and snippets.

@SoylentGraham
Last active August 6, 2019 12:05
Show Gist options
  • Save SoylentGraham/8dae2c2f5eed84be8800badd0e652a1d to your computer and use it in GitHub Desktop.
Save SoylentGraham/8dae2c2f5eed84be8800badd0e652a1d to your computer and use it in GitHub Desktop.
SolvePnp tvec & rvec into object-space rot & trans for the camera pose
// the vectors from SolvePnp forms the matrix to place the object in camera space
// therefore the matrix is ObjectToView/ObjectTo[CameraObject] (in -1..1, -1..1 space as SolvePnp takes no projection, it's in the normalised frustum)
// object transform in camera space
auto ObjectTranslation = TranslationVec;
cv::Mat ObjectRotation;
Rodrigues(RotationVec, ObjectRotation);
cv::Mat ObjectToCamera = cv::Mat::eye(4, 4, CV_64F);
ObjectRotation.copyTo( ObjectToCamera.rowRange(0, 3).colRange(0, 3) );
ObjectTranslation.copyTo( ObjectToCamera.rowRange(0, 3).col(3) );
// we want the inverse, to get Camera position relative to object[space]
// https://github.com/fta2012/WiimotePositionTrackingDemo/blob/master/demo.cpp#L207
// "Find the inverse of the extrinsic matrix (should be the same as just calling extrinsic.inv())"
// "inverse of a rotational matrix is its transpose"
cv::Mat CameraRotation = ObjectRotation.inv();
// un-rotate the translation
// gr: DONT do -mtx which just negates everything and makes things more confusing
// camera pos in object space
cv::Mat CameraTranslation = CameraRotation * ObjectTranslation;
// camera space to object space (cameralocal->objectlocal)
cv::Mat CameraToObject = cv::Mat::eye(4, 4, CV_64F);
CameraRotation.copyTo( CameraToObject.rowRange(0, 3).colRange(0, 3) );
CameraTranslation.copyTo( CameraToObject.rowRange(0, 3).col(3) );
// now that we don't negate everything, the X is the only coordinate space that's backwards for our engine
// gr: but why do we not invert rotation...
float NegateXAxisMatrix[] =
{
-1,0,0,
0,1,0,
0,0,1,
};
cv::Mat NegateXAxisMat( 3, 3, CV_32F, NegateXAxisMatrix );
CameraTranslation = NegateXAxisMat * CameraTranslation;
// gr: lets output seperate things instead of one matrix...
BufferArray<float,3> PosArray;
PosArray.PushBack( CameraTranslation.at<float>(0) );
PosArray.PushBack( CameraTranslation.at<float>(1) );
PosArray.PushBack( CameraTranslation.at<float>(2) );
BufferArray<float,3*3> RotArray;
RotArray.PushBack( CameraRotation.at<float>(0,0) );
RotArray.PushBack( CameraRotation.at<float>(1,0) );
RotArray.PushBack( CameraRotation.at<float>(2,0) );
RotArray.PushBack( CameraRotation.at<float>(0,1) );
RotArray.PushBack( CameraRotation.at<float>(1,1) );
RotArray.PushBack( CameraRotation.at<float>(2,1) );
RotArray.PushBack( CameraRotation.at<float>(0,2) );
RotArray.PushBack( CameraRotation.at<float>(1,2) );
RotArray.PushBack( CameraRotation.at<float>(2,2) );
auto Output = Params.mLocalContext.mGlobalContext.CreateObjectInstance( Params.mLocalContext );
Output.SetArray("Translation", GetArrayBridge(PosArray) );
Output.SetArray("Rotation", GetArrayBridge(RotArray) );
Params.Return(Output);
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment