Skip to content

Instantly share code, notes, and snippets.

@phsamuel
Last active March 27, 2020 23:42
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save phsamuel/78a608b091b2221f552c7417ae27e809 to your computer and use it in GitHub Desktop.
Save phsamuel/78a608b091b2221f552c7417ae27e809 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# A test of OpenCV feature descriptors (please see https://youtu.be/qBw1OVd_i6Q)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Test on different feature descriptor in OpenCV\n",
"# Note that you need to install opencv-contrib-python besides opencv-python\n",
"# some versions of OpenCV does not work, 3.4.2.16 should work\n",
"# pip install --user opencv-python==3.4.2.16\n",
"# pip install --user opencv-contrib-python==3.4.2.16"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"from scipy.stats import norm\n",
"from scipy.signal import convolve2d\n",
"\n",
"cap = cv2.VideoCapture(1) # using my webcam\n",
"window_name='camera'\n",
"\n",
"cv2.namedWindow(window_name,cv2.WND_PROP_FULLSCREEN)\n",
"cv2.setWindowProperty(window_name,cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\n",
"\n",
"# desc = cv2.xfeatures2d.SIFT_create()\n",
"# desc = cv2.xfeatures2d.SURF_create()\n",
"# desc = cv2.KAZE_create()\n",
"# desc = cv2.AKAZE_create()\n",
"desc = cv2.BRISK_create()\n",
"\n",
"while (True):\n",
" ret, frame = cap.read()\n",
" frame=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n",
" \n",
" (kps, descs) = desc.detectAndCompute(frame, None)\n",
" img2 = cv2.drawKeypoints(frame,kps,frame.copy(),color=(0,255,0), flags=0)\n",
" \n",
" cv2.imshow(window_name,img2)\n",
"# out.write(frame)\n",
" if cv2.waitKey(1) & 0xFF == ord('q'): # btw, you need to click the screen first. And then \n",
" # press q to quit\n",
" break\n",
" \n",
"cap.release()\n",
"\n",
"#out.release()\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Try to match features of webcam frames with a pre-captured image. You will need to change the filename 'window.jpg'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"from scipy.stats import norm\n",
"from scipy.signal import convolve2d\n",
"\n",
"cap = cv2.VideoCapture(1)\n",
"window_name='camera'\n",
"\n",
"img1 = cv2.imread('window.jpg', cv2.IMREAD_GRAYSCALE)\n",
"\n",
"cv2.namedWindow(window_name,cv2.WND_PROP_FULLSCREEN)\n",
"cv2.setWindowProperty(window_name,cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\n",
"\n",
"\n",
"\n",
"# desc = cv2.xfeatures2d.SIFT_create()\n",
"desc = cv2.xfeatures2d.SURF_create()\n",
"# desc = cv2.KAZE_create()\n",
"# desc = cv2.AKAZE_create()\n",
"# desc = cv2.BRISK_create()\n",
"\n",
"kps1, descs1 = desc.detectAndCompute(img1, None)\n",
"matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)\n",
"\n",
"while (True):\n",
" ret, frame = cap.read()\n",
" img2=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n",
" \n",
" (kps, descs) = desc.detectAndCompute(img2, None)\n",
" knn_matches = matcher.knnMatch(descs1, descs, 2)\n",
"\n",
" \n",
" ratio_thresh = 0.6\n",
" good_matches = []\n",
" for m,n in knn_matches:\n",
" if m.distance < ratio_thresh * n.distance:\n",
" good_matches.append(m) \n",
" \n",
" #-- Draw matches\n",
" img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)\n",
" cv2.drawMatches(img1, kps1, img2, kps, good_matches, img_matches, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n",
" #-- Show detected matches\n",
"# cv2.imshow('Good Matches', img_matches)\n",
" cv2.imshow(window_name,img_matches)\n",
" # out.write(frame)\n",
" if cv2.waitKey(1) & 0xFF == ord('q'): # btw, you need to click the screen first. And then \n",
" # press q to quit\n",
" break\n",
" \n",
"cap.release()\n",
"\n",
"#out.release()\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Like the above, but also warp one of the image to match the other"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"from scipy.stats import norm\n",
"from scipy.signal import convolve2d\n",
"\n",
"cap = cv2.VideoCapture(1)\n",
"window_name='camera'\n",
"\n",
"img1 = cv2.imread('window.jpg', cv2.IMREAD_GRAYSCALE)\n",
"\n",
"cv2.namedWindow(window_name)\n",
"cv2.namedWindow('warp')\n",
"#cv2.namedWindow(window_name,cv2.WND_PROP_FULLSCREEN)\n",
"#cv2.setWindowProperty(window_name,cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\n",
"\n",
"\n",
"\n",
"# desc = cv2.xfeatures2d.SIFT_create()\n",
"desc = cv2.xfeatures2d.SURF_create()\n",
"# desc = cv2.KAZE_create()\n",
"# desc = cv2.AKAZE_create()\n",
"# desc = cv2.BRISK_create()\n",
"\n",
"kps1, descs1 = desc.detectAndCompute(img1, None)\n",
"matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)\n",
"\n",
"while (True):\n",
" ret, frame = cap.read()\n",
" img2=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n",
" \n",
" (kps, descs) = desc.detectAndCompute(img2, None)\n",
" knn_matches = matcher.knnMatch(descs1, descs, 2)\n",
"\n",
" \n",
" ratio_thresh = 0.6\n",
" good_matches = []\n",
" for m,n in knn_matches:\n",
" if m.distance < ratio_thresh * n.distance:\n",
" good_matches.append(m) \n",
" \n",
" #-- Draw matches\n",
" if len(good_matches)> 15:\n",
" h, status = cv2.findHomography(np.array([[*(kps1[q.queryIdx].pt)] for q in good_matches]), \\\n",
" np.array([[*(kps[q.trainIdx].pt)] for q in good_matches]),method=cv2.RANSAC)\n",
" if h is None:\n",
" continue\n",
" im_out = cv2.warpPerspective(img1, h, (img2.shape[1],img2.shape[0]))\n",
" cv2.imshow('warp',im_out)\n",
" \n",
" img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)\n",
" cv2.drawMatches(img1, kps1, img2, kps, good_matches, img_matches, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n",
" #-- Show detected matches\n",
"# cv2.imshow('Good Matches', img_matches)\n",
" cv2.imshow(window_name,img_matches)\n",
" # out.write(frame)\n",
" if cv2.waitKey(1) & 0xFF == ord('q'): # btw, you need to click the screen first. And then \n",
" # press q to quit\n",
" break\n",
" \n",
"cap.release()\n",
"\n",
"#out.release()\n",
"\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment