Skip to content

Instantly share code, notes, and snippets.

@dixyes
Last active October 13, 2018 14:03
Show Gist options
  • Save dixyes/b9dcc3cdb60a4b747047f361ba1c74f9 to your computer and use it in GitHub Desktop.
Save dixyes/b9dcc3cdb60a4b747047f361ba1c74f9 to your computer and use it in GitHub Desktop.
截图抽奖

截图抽奖

使用了opencv的python接口

注意使用了非自由的sift/surf(需要编译nonfree的opencv-contrib-python库) 效果

基本思路

  • 准备:
  • 准备识别的模板(截图抽奖大圆圈)data/tem2x.png
  • 准备奖品时间照片放在data/times/*.png
  • 计算抽奖目标区域相对于模板的位置和大小
  • 计算data/times/*.png的sift/surf,储存在字典
  • 计算data/tem2x.png的sift/surf
  • 实际计算
  • 用skimage.io读取输入文件/url
  • 计算输入的sift/surf
  • knn比对
  • 若比对点少于30视为未找到,放弃
  • 否则截取目标区域并计算sift/surf,分别与字典内的数据比对,取最大值为结果

参考

主要代码复制自opencv官网文档 https://docs.opencv.org/3.4.3/dc/dc3/tutorial_py_matcher.html

若干博客内容

维基百科

import sys,os
from skimage import io
import urllib.request
import numpy as np
import cv2
#from matplotlib import pyplot as plt
dataPath = "C:\\to\\your\datadir"
class AutoBanDectector():
MIN_MATCH_COUNT = 30
def __init__(self):
# init sift and surf
self.sift = cv2.xfeatures2d.SIFT_create()
self.surf = cv2.xfeatures2d.SURF_create()
# build sift result dict
# todo: pickle it
self.resDict = {}
for fn in os.listdir(dataPath+"\\times"):
pfn = dataPath+"\\times\\"+fn
nt = fn.split(".")[0]
img = cv2.imread(pfn,0)
self.resDict[nt] = self.sift.detectAndCompute(img,None)
# init flann searcher
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
self.flann = cv2.FlannBasedMatcher(index_params, search_params)
# build surf points for template
img = cv2.imread(dataPath+"\\tem2x.png",0) # todo: from structed data
self.kpTemplate, self.desTemplate = self.surf.detectAndCompute(img,None)
def fuck(self,fn):
img2d = io.imread(fn,0)
if None is img2d:
raise Exception("bad file %s"%fn)
sca = max(img2d.shape)//1024
while sca > 1:
sca = sca>>1
img2d = cv2.pyrDown(img2d)
kp, des = self.surf.detectAndCompute(img2d,None)
matches = self.flann.knnMatch(self.desTemplate,des,k=2)
# from ocv offical document
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
print("found",len(good))
if len(good)>self.MIN_MATCH_COUNT:
# warp perspective
src_pts = np.float32([ self.kpTemplate[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
imgText = cv2.warpPerspective(img2d, cv2.findHomography(dst_pts, src_pts, cv2.RANSAC)[0], (img2d.shape[0],img2d.shape[1]))
imgText = imgText[0:75, 65:101] # todo: from structed data
return self.findBestmatch(imgText)
else:
# fail
return None
def findBestmatch(self,img):
_,des = self.sift.detectAndCompute(img,None)
maxMatch = "none"
maxMatchNum = 0
for k in self.resDict.keys():
matches = self.flann.knnMatch(des,self.resDict[k][1],k=2)
mt = 0
for m,n in matches:
if m.distance < 0.7*n.distance:
mt+=1
#print(k,mt)
if mt>=maxMatchNum:
maxMatch = k
maxMatchNum = mt
#print("bestmatch",maxMatch,maxMatchNum)
return maxMatch
if __name__ == "__main__":
detector = AutoBanDectector()
print(detector.fuck(sys.argv[1]))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment