Skip to content

Instantly share code, notes, and snippets.

@cashiwamochi
Last active September 16, 2023 14:19
Show Gist options
  • Star 19 You must be signed in to star a gist
  • Fork 2 You must be signed in to fork a gist
  • Save cashiwamochi/8ac3f8bab9bf00e247a01f63075fedeb to your computer and use it in GitHub Desktop.
Save cashiwamochi/8ac3f8bab9bf00e247a01f63075fedeb to your computer and use it in GitHub Desktop.
This code is used for simple triangulation. It uses findEssentialMat, recoverPose, triangulatePoints in OpenCV. For viewer, PCL is used. You can watch 3D points and 2 camera poses. I checked alcatraz2.jpg and alcatraz1.jpg in pcv_data.zip (https://www.oreilly.co.jp/pub/9784873116075/). Perhaps there is something wrong ( actually it looks working…
#include <opencv2/opencv.hpp>
#include <pcl/common/common_headers.h>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <Eigen/Core>
#include <Eigen/LU>
#include <Eigen/Geometry>
using namespace std;
int main(int argc, char* argv[]) {
if(argc != 3) {
cout <<
"usage: this.out [/path/to/image1] [path/to/image2] "
<< endl;
return -1;
}
cv::Mat image1 = cv::imread(argv[1]);
cv::Mat image2 = cv::imread(argv[2]);
// Camera intristic parameter matrix
// I did not calibration
cv::Mat K = (cv::Mat_<float>(3,3) << 500.f, 0.f, image1.cols / 2.f,
0.f, 500.f, image1.rows / 2.f,
0.f, 0.f, 1.f);
vector<cv::KeyPoint> kpts_vec1, kpts_vec2;
cv::Mat desc1, desc2;
cv::Ptr<cv::AKAZE> akaze = cv::AKAZE::create();
// extract feature points and calculate descriptors
akaze -> detectAndCompute(image1, cv::noArray(), kpts_vec1, desc1);
akaze -> detectAndCompute(image2, cv::noArray(), kpts_vec2, desc2);
cv::BFMatcher* matcher = new cv::BFMatcher(cv::NORM_L2, false);
// cross check flag set to false
// because i do cross-ratio-test match
vector< vector<cv::DMatch> > matches_2nn_12, matches_2nn_21;
matcher->knnMatch( desc1, desc2, matches_2nn_12, 2 );
matcher->knnMatch( desc2, desc1, matches_2nn_21, 2 );
const double ratio = 0.8;
vector<cv::Point2f> selected_points1, selected_points2;
for(int i = 0; i < matches_2nn_12.size(); i++) { // i is queryIdx
if( matches_2nn_12[i][0].distance/matches_2nn_12[i][1].distance < ratio
and
matches_2nn_21[matches_2nn_12[i][0].trainIdx][0].distance
/ matches_2nn_21[matches_2nn_12[i][0].trainIdx][1].distance < ratio )
{
if(matches_2nn_21[matches_2nn_12[i][0].trainIdx][0].trainIdx
== matches_2nn_12[i][0].queryIdx)
{
selected_points1.push_back(kpts_vec1[matches_2nn_12[i][0].queryIdx].pt);
selected_points2.push_back(
kpts_vec2[matches_2nn_21[matches_2nn_12[i][0].trainIdx][0].queryIdx].pt
);
}
}
}
if(true) {
cv::Mat src;
cv::hconcat(image1, image2, src);
for(int i = 0; i < selected_points1.size(); i++) {
cv::line( src, selected_points1[i],
cv::Point2f(selected_points2[i].x + image1.cols, selected_points2[i].y),
1, 1, 0 );
}
cv::imwrite("match-result.png", src);
}
cv::Mat Kd;
K.convertTo(Kd, CV_64F);
cv::Mat mask; // unsigned char array
cv::Mat E = cv::findEssentialMat(selected_points1, selected_points2, Kd.at<double>(0,0),
// cv::Point2f(0.f, 0.f),
cv::Point2d(image1.cols/2., image1.rows/2.),
cv::RANSAC, 0.999, 1.0, mask);
// E is CV_64F not 32F
vector<cv::Point2f> inlier_match_points1, inlier_match_points2;
for(int i = 0; i < mask.rows; i++) {
if(mask.at<unsigned char>(i)){
inlier_match_points1.push_back(selected_points1[i]);
inlier_match_points2.push_back(selected_points2[i]);
}
}
if(true) {
cv::Mat src;
cv::hconcat(image1, image2, src);
for(int i = 0; i < inlier_match_points1.size(); i++) {
cv::line( src, inlier_match_points1[i],
cv::Point2f(inlier_match_points2[i].x + image1.cols, inlier_match_points2[i].y),
1, 1, 0 );
}
cv::imwrite("inlier_match_points.png", src);
}
mask.release();
cv::Mat R, t;
cv::recoverPose(E,
inlier_match_points1,
inlier_match_points2,
R, t, Kd.at<double>(0,0),
// cv::Point2f(0, 0),
cv::Point2d(image1.cols/2., image1.rows/2.),
mask);
// R,t is CV_64F not 32F
vector<cv::Point2d> triangulation_points1, triangulation_points2;
for(int i = 0; i < mask.rows; i++) {
if(mask.at<unsigned char>(i)){
triangulation_points1.push_back
(cv::Point2d((double)inlier_match_points1[i].x,(double)inlier_match_points1[i].y));
triangulation_points2.push_back
(cv::Point2d((double)inlier_match_points2[i].x,(double)inlier_match_points2[i].y));
}
}
if(true) {
cv::Mat src;
cv::hconcat(image1, image2, src);
for(int i = 0; i < triangulation_points1.size(); i++) {
cv::line( src, triangulation_points1[i],
cv::Point2f((float)triangulation_points2[i].x + (float)image1.cols,
(float)triangulation_points2[i].y),
1, 1, 0 );
}
cv::imwrite("triangulatedPoints.png", src);
}
cv::Mat Rt0 = cv::Mat::eye(3, 4, CV_64FC1);
cv::Mat Rt1 = cv::Mat::eye(3, 4, CV_64FC1);
R.copyTo(Rt1.rowRange(0,3).colRange(0,3));
t.copyTo(Rt1.rowRange(0,3).col(3));
cv::Mat point3d_homo;
cv::triangulatePoints(Kd * Rt0, Kd * Rt1,
triangulation_points1, triangulation_points2,
point3d_homo);
//point3d_homo is 64F
//available input type is here
//https://stackoverflow.com/questions/16295551/how-to-correctly-use-cvtriangulatepoints
assert(point3d_homo.cols == triangulation_points1.size());
// prepare a viewer
pcl::visualization::PCLVisualizer viewer("Viewer");
viewer.setBackgroundColor (255, 255, 255);
// create point cloud
pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGB>);
cloud->points.resize (point3d_homo.cols);
for(int i = 0; i < point3d_homo.cols; i++) {
pcl::PointXYZRGB &point = cloud->points[i];
cv::Mat p3d;
cv::Mat _p3h = point3d_homo.col(i);
convertPointsFromHomogeneous(_p3h.t(), p3d);
point.x = p3d.at<double>(0);
point.y = p3d.at<double>(1);
point.z = p3d.at<double>(2);
point.r = 0;
point.g = 0;
point.b = 255;
}
viewer.addPointCloud(cloud, "Triangulated Point Cloud");
viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE,
3,
"Triangulated Point Cloud");
viewer.addCoordinateSystem (1.0);
// add the second camera pose
Eigen::Matrix4f eig_mat;
Eigen::Affine3f cam_pose;
R.convertTo(R, CV_32F);
t.convertTo(t, CV_32F);
//this shows how a camera moves
cv::Mat Rinv = R.t();
cv::Mat T = -Rinv * t;
eig_mat(0,0) = Rinv.at<float>(0,0);eig_mat(0,1) = Rinv.at<float>(0,1);eig_mat(0,2) = Rinv.at<float>(0,2);
eig_mat(1,0) = Rinv.at<float>(1,0);eig_mat(1,1) = Rinv.at<float>(1,1);eig_mat(1,2) = Rinv.at<float>(1,2);
eig_mat(2,0) = Rinv.at<float>(2,0);eig_mat(2,1) = Rinv.at<float>(2,1);eig_mat(2,2) = Rinv.at<float>(2,2);
eig_mat(3,0) = 0.f; eig_mat(3,1) = 0.f; eig_mat(3,2) = 0.f;
eig_mat(0, 3) = T.at<float>(0);
eig_mat(1, 3) = T.at<float>(1);
eig_mat(2, 3) = T.at<float>(2);
eig_mat(3, 3) = 1.f;
cam_pose = eig_mat;
//cam_pose should be Affine3f, Affine3d cannot be used
viewer.addCoordinateSystem(1.0, cam_pose, "2nd cam");
viewer.initCameraParameters ();
while (!viewer.wasStopped ()) {
viewer.spin();
}
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment