Skip to content

Instantly share code, notes, and snippets.

@dotchang
Created February 14, 2014 05:08
Show Gist options
  • Save dotchang/8996116 to your computer and use it in GitHub Desktop.
Save dotchang/8996116 to your computer and use it in GitHub Desktop.
#include "opencv2/opencv.hpp"
#include "opencv2/nonfree/nonfree.hpp"
//http://blog.livedoor.jp/hen_cyberne/archives/51072839.html
//http://whoopsidaisies.hatenablog.com/entry/2013/12/07/135810
using namespace cv;
using namespace std;
#if _DEBUG
#pragma comment(lib, "opencv_core248d.lib")
#pragma comment(lib, "opencv_highgui248d.lib")
#pragma comment(lib, "opencv_imgproc248d.lib")
#pragma comment(lib, "opencv_contrib248d.lib")
#pragma comment(lib, "opencv_calib3d248d.lib")
#pragma comment(lib, "opencv_features2d248d.lib")
#pragma comment(lib, "opencv_nonfree248d.lib")
#else
#pragma comment(lib, "opencv_core248.lib")
#pragma comment(lib, "opencv_highgui248.lib")
#pragma comment(lib, "opencv_imgproc248.lib")
#pragma comment(lib, "opencv_contrib248.lib")
#pragma comment(lib, "opencv_calib3d248.lib")
#pragma comment(lib, "opencv_features2d248.lib")
#pragma comment(lib, "opencv_nonfree248.lib")
#endif
void feature_matching (vector<KeyPoint>& keypoints1, vector<KeyPoint>& keypoints2, std::vector<cv::DMatch> & dmatch, Mat & img1, Mat & img2)
{
//Grid, Pyramid
//FAST,FASTX,STAR,SIFT,SURF,ORB,BRISK,MSER,GFTT,HARRIS,Dense,SimpleBlob
const std::string& featureDetectorName = "SURF";
//Opponent
//SIFT,SURF,BRIEF,BRISK,ORB,FREAK
const std::string& descriptorExtractorName = "SURF";
//SIFT, SURF : Euclid
//BRIEF, ORB, FREAK: Binary .type() CV_8U: Binary, Hamming Distance
//BruteForce,BruteForce-L1,BruteForce-SL2,BruteForce-Hamming,BruteForce-Hamming(2),FlannBased
const std::string& descriptorMatcherName = "FlannBased";
bool crossCheck = false; // true
// SIFT・SURFモジュールの初期化
cv::initModule_nonfree();
// 特徴点抽出
cv::Ptr<cv::FeatureDetector> detector = cv::FeatureDetector::create(featureDetectorName);
detector->detect(img1, keypoints1);
detector->detect(img2, keypoints2);
// 特徴記述
cv::Ptr<cv::DescriptorExtractor> extractor = cv::DescriptorExtractor::create(descriptorExtractorName);
cv::Mat descriptor1, descriptor2;
extractor->compute(img1, keypoints1, descriptor1);
extractor->compute(img2, keypoints2, descriptor2);
// マッチング
cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create(descriptorMatcherName);
if (crossCheck)
{
// クロスチェックする場合
std::vector<cv::DMatch> match12, match21;
matcher->match(descriptor1, descriptor2, match12);
matcher->match(descriptor2, descriptor1, match21);
for (size_t i = 0; i < match12.size(); i++)
{
cv::DMatch forward = match12[i];
cv::DMatch backward = match21[forward.trainIdx];
if (backward.trainIdx == forward.queryIdx)
dmatch.push_back(forward);
}
}
else
{
// クロスチェックしない場合
matcher->match(descriptor1, descriptor2, dmatch);
}
#if 1
// マッチング結果の表示
cv::Mat out;
cv::drawMatches(img1, keypoints1, img2, keypoints2, dmatch, out);
cv::imshow("matching", out);
waitKey(1);
//while (cv::waitKey(1) == -1);
#endif
}
void rectify_test(int argc, char *argv[])
{
Mat m_img1 = imread ( "right01.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat m_img2 = imread ( "left01.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if(m_img1.empty() || m_img2.empty()) return;
vector<Point2f> m_imgPoints1;
vector<Point2f> m_imgPoints2;
vector<DMatch> m_matches;
vector<KeyPoint> m_keypoints1, m_keypoints2;
Mat dst1, dst2 ; // ステレオ平行化画像出力先
Mat F, H1, H2; // F行列、ステレオ平行化のための変換行列
Mat ptMat1, ptMat2; // 対応点格納用行列
vector<uchar> mask; // マスク(ダミー)
// surf matching
feature_matching (m_keypoints1, m_keypoints2, m_matches, m_img1, m_img2);
m_imgPoints1.resize ( m_keypoints1.size() );
m_imgPoints2.resize ( m_keypoints1.size() );
for ( size_t i = 0; i < m_matches.size(); ++i )
{
int i1 = m_matches[i].queryIdx;
int i2 = m_matches[i].trainIdx;
m_imgPoints1[i1] = m_keypoints1[i1].pt;
m_imgPoints2[i1] = m_keypoints2[i2].pt;
}
ptMat1 = Mat ( m_imgPoints1 );
ptMat2 = Mat ( m_imgPoints2 );
// fundam.cpp ... line:1108, line: 1073
F = findFundamentalMat(ptMat1, ptMat2, cv::FM_RANSAC); //CV_FM_7POINT, CV_FM_8POINT, cv::FM_RANSAC, CV_FM_LMEDS
stereoRectifyUncalibrated(ptMat1, ptMat2, F, m_img1.size(), H1, H2);
dst1 = Mat ( m_img1.size(), m_img1.type() );
dst2 = Mat ( m_img2.size(), m_img2.type() );
//widh intrinsic: call cv::initUndistortRectifyMap
warpPerspective (m_img1, dst1, H1, dst1.size() );
warpPerspective (m_img2, dst2, H2, dst2.size() );
imwrite ( "rectfied_1.png", dst1 );
imwrite ( "rectfied_2.png", dst2 );
imshow("rectified1", dst1);
imshow("rectified2", dst2);
waitKey();
}
int main(int argc, char* argv[])
{
rectify_test(argc, argv);
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment