Skip to content

Instantly share code, notes, and snippets.

@yutaroyamanaka
Created July 4, 2020 14:28
Show Gist options
  • Save yutaroyamanaka/47ea01902fd5caa40d9a2724d7465f40 to your computer and use it in GitHub Desktop.
Save yutaroyamanaka/47ea01902fd5caa40d9a2724d7465f40 to your computer and use it in GitHub Desktop.
2Dmatch.py
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.')
parser.add_argument('--input1', help='Path to input image 1.', default='box.png')
parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png')
parser.add_argument('--input3', help='Path to input image 2.', default='box_in_scene.png')
args = parser.parse_args()
img_object = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE)
print(img_object.shape)
cap = cv.VideoCapture(cv.samples.findFile(args.input2))
fmt = cv.VideoWriter_fourcc('m', 'p', '4', 'v') # ファイル形式(ここではmp4)
out = cv.VideoWriter('output.mp4',fmt, 30.0, (1920*2, 1080))
img_adv = cv.imread(cv.samples.findFile(args.input3), cv.IMREAD_GRAYSCALE)
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
img_scene = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
minHessian = 400
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None)
keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None)
#-- Step 2: Matching descriptor vectors with a FLANN based matcher
# Since SURF is a floating-point descriptor NORM_L2 is used
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
#-- Filter matches using the Lowe's ratio test
ratio_thresh = 0.75
good_matches = []
for m,n in knn_matches:
if m.distance < ratio_thresh * n.distance:
good_matches.append(m)
print(len(good_matches))
img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
obj = np.empty((len(good_matches),2), dtype=np.float32)
scene = np.empty((len(good_matches),2), dtype=np.float32)
for i in range(len(good_matches)):
#-- Get the keypoints from the good matches
obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1]
H, _ = cv.findHomography(obj, scene, cv.RANSAC)
target_corners = np.empty((4, 1, 2), dtype=np.float32)
target_corners[0, 0, 0] = 210
target_corners[0, 0, 1] = 400
target_corners[1,0,0] = 910 # 700
target_corners[1,0,1] = 400 #
target_corners[2,0,0] = 910 # 700
target_corners[2,0,1] = 1200 # 2700
target_corners[3,0,0] = 210 # 2900
target_corners[3,0,1] = 1200 # 2700
picture_corners = cv.perspectiveTransform(target_corners, H)
perspective_adv_img = cv.warpPerspective(img_adv, H, (img_adv.shape[1], img_adv.shape[0]))
cv.imwrite("adv-perspective.jpg", perspective_adv_img)
perspective_adv_img = cv.imread("adv-perspective.jpg")
isBlack = perspective_adv_img[:,:, :] == 0
print("isBlack", isBlack.shape)
print(img_matches.shape)
isSeen = perspective_adv_img[:,:,:] != 0
addImg = perspective_adv_img * isSeen
img_matches[:, img_object.shape[1]:, :] = img_matches[:, img_object.shape[1]:, :] * isBlack
img_matches[:, img_object.shape[1]:, :] += addImg
# write frame
out.write(img_matches)
else:
break
cap.release()
out.release()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment