Skip to content

Instantly share code, notes, and snippets.

@iago-suarez
Last active February 3, 2023 09:51
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save iago-suarez/9022c59d6b47d5583992dd0b2f5baad7 to your computer and use it in GitHub Desktop.
Save iago-suarez/9022c59d6b47d5583992dd0b2f5baad7 to your computer and use it in GitHub Desktop.
beblid-demo-code
#include <iostream>
#include <opencv2/opencv.hpp>
#include "BEBLID.h"
int main() {
// Read the input images in grayscale format (CV_8UC1)
cv::Mat img1 = cv::imread("../imgs/img1.jpg", cv::IMREAD_GRAYSCALE);
cv::Mat img2 = cv::imread("../imgs/img3.jpg", cv::IMREAD_GRAYSCALE);
// Create the feature detector, for example ORB
auto detector = cv::ORB::create();
// Detect features in both images
std::vector<cv::KeyPoint> points1, points2;
detector->detect(img1, points1);
detector->detect(img2, points2);
std::cout << "Detected " << points1.size() << " kps in image1" << std::endl;
std::cout << "Detected " << points2.size() << " kps in image2" << std::endl;
// Use 32 bytes per descriptor and configure the scale factor for ORB detector
auto descriptor = BEBLID::create(256, 0.75);
// Describe the detected features i both images
cv::Mat descriptors1, descriptors2;
descriptor->compute(img1, points1, descriptors1);
descriptor->compute(img2, points2, descriptors2);
std::cout << "Points described" << std::endl;
// Match the generated descriptors for img1 and img2 using brute force matching
cv::BFMatcher matcher(cv::NORM_HAMMING, true);
std::vector<cv::DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
std::cout << "Number of matches: " << matches.size() << std::endl;
// If there is not enough matches exit
if (matches.size() < 4) exit(-1);
// Take only the matched points that will be used to calculate the
// transformation between both images
std::vector<cv::Point2d> matched_pts1, matched_pts2;
for (cv::DMatch match : matches) {
matched_pts1.push_back(points1[match.queryIdx].pt);
matched_pts2.push_back(points2[match.trainIdx].pt);
}
// Find the homography that transforms a point in the first image to a point in the second image.
cv::Mat inliers;
cv::Mat H = cv::findHomography(matched_pts1, matched_pts2, cv::RANSAC, 3, inliers);
// Print the number of inliers, that is, the number of points correctly
// mapped by the transformation that we have estimated
std::cout << "Number of inliers " << cv::sum(inliers)[0]
<< " ( " << (100.0f * cv::sum(inliers)[0] / matches.size()) << "% )" << std::endl;
// Convert the image to BRG format from grayscale
cv::cvtColor(img1, img1, cv::COLOR_GRAY2BGR);
cv::cvtColor(img2, img2, cv::COLOR_GRAY2BGR);
// Draw all the matched keypoints in red color
cv::Mat all_matches_img;
cv::drawMatches(img1, points1, img2, points2, matches,
all_matches_img, CV_RGB(255, 0, 0), CV_RGB(255, 0, 0));
// Show and save the result
cv::imshow("All matches", all_matches_img);
cv::waitKey();
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment