Skip to content

Instantly share code, notes, and snippets.

@punsisi2018861
Created June 26, 2021 19:13
Show Gist options
  • Save punsisi2018861/1b907fe63ff1d62dd93cbb753665a29f to your computer and use it in GitHub Desktop.
Save punsisi2018861/1b907fe63ff1d62dd93cbb753665a29f to your computer and use it in GitHub Desktop.
from flask import Flask, request, jsonify
import os
import cv2
import numpy as np
import pytesseract
import csv
import re
import io
import os
import tempfile
# Imports the Google Cloud client library
from google.cloud import vision
app = Flask(__name__)
# set the path of the Google Sercice Key downloaded
os.environ[
"GOOGLE_APPLICATION_CREDENTIALS"] = 'C:/Users/PunsisiK.LOITL-SE03/Downloads/qwiklabs-gcp-02-de13f4ddd638-843eec04eacc.json'
# Instantiates a client
client = vision.ImageAnnotatorClient()
per = 25
pixelThreshold = 500
roi = [[(300, 112), (540, 156), 'text', 'LID'],
[(676, 112), (988, 180), 'text', 'NIC'],
[(274, 154), (778, 236), 'text', 'Name'],
[(278, 228), (642, 294), 'text', 'Address'],
[(280, 292), (532, 336), 'text', 'DOB'],
[(276, 334), (540, 384), 'text', 'D1'],
[(274, 376), (542, 426), 'text', 'D2'],
[(276, 420), (550, 478), 'text', 'Blood Group']]
# allow POST requests
@app.route('/OCRDL', methods=['POST'])
def faceCompare():
data = request.get_json()
img1 = data['img1']
result = readData(img1)
return jsonify(result)
def readData(img1):
imgQ = cv2.imread('QueryImg/Drivers License.png')
h, w, c = imgQ.shape
orb = cv2.ORB_create(1000)
kp1, des1 = orb.detectAndCompute(imgQ, None)
path = 'DL'
imgID = img1
img = cv2.imread(path + "/" + imgID)
kp2, des2 = orb.detectAndCompute(img, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.match(des2, des1)
matches.sort(key=lambda x: x.distance)
good = matches[:int(len(matches) * (per / 100))]
srcPoints = np.float32([kp2[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dstPoints = np.float32([kp1[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, _ = cv2.findHomography(srcPoints, dstPoints, cv2.RANSAC, 5.0)
imgScan = cv2.warpPerspective(img, M, (w, h))
# cv2.imshow(y, imgScan)
imgShow = imgScan.copy()
imgMask = np.zeros_like(imgShow)
myData = []
print(f'################## Extracting Data from Form {imgID} ##################')
for x, r in enumerate(roi):
cv2.rectangle(imgMask, (r[0][0], r[0][1]), (r[1][0], r[1][1]), (0, 255, 0), cv2.FILLED)
imgShow = cv2.addWeighted(imgShow, 0.99, imgMask, 0.1, 0)
imgCrop = imgScan[r[0][1]:r[1][1], r[0][0]:r[1][0]]
if r[2] == 'text':
with tempfile.TemporaryDirectory() as tmpdirname:
num = x + 1
totals_file = os.path.join(tmpdirname, str(num) + '.jpg')
cv2.imwrite(totals_file, imgCrop)
# Loads the image into memory
with io.open(totals_file, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
string = []
for text in texts:
string.append(text.description)
if len(string) != 0:
print(string[0])
myData.append(str(string[0]).replace("\n", " "))
os.remove(totals_file)
print("Data>>>>>>", myData)
return myData
if __name__ == '__main__':
app.run()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment