Skip to content

Instantly share code, notes, and snippets.

View q-viper's full-sized avatar
🏠
Working from home

Ramkrishna Acharya q-viper

🏠
Working from home
View GitHub Profile
@q-viper
q-viper / preprocess.py
Last active November 16, 2019 16:55
A OCR Preprocessing Method.
def preprocess(bgr_img):#gray image
img = bgr_img[:]
blur = cv2.GaussianBlur(img,(5,5),0)
ret,th_img = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) #converts black to white and inverse
rows, cols = th_img.shape
bg_test = np.array([th_img[i][i] for i in range(5)])
if bg_test.all() == 0:
text_color = 255
@q-viper
q-viper / preprocess.py
Created November 16, 2019 17:01
Finding ROI
def borders(here_img, thresh):
size = here_img.shape
check = int(115 * size[0] / 600)
image = here_img[:]
top, bottom = 0, size[0] - 1
#plt.imshow(image)
#plt.show()
shape = size
@q-viper
q-viper / preprocess.py
Created November 16, 2019 17:02
Segmentation
def segmentation(bordered, thresh):
try:
shape = bordered.shape
check = int(50 * shape[0] / 320)
image = bordered[:]
image = image[check:].T
shape = image.shape
#plt.imshow(image)
#plt.show()
@q-viper
q-viper / preprocess.py
Created November 16, 2019 17:06
Localize Segments.
def localize(main_image, gray_img, localized, bc, show):
#open the template as gray scale image
template = localized
#print(template.shape)
width, height = template.shape[::-1] #get the width and height
#match the template using cv2.matchTemplate
match = cv2.matchTemplate(gray_img, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.8
position = np.where(match >= threshold) #get the location of template in the image
for point in zip(*position[::-1]): #draw the rectangle around the matched template
@q-viper
q-viper / preprocess.py
Created November 16, 2019 17:07
Border Drawing
def detect_text(main_image, gray_img, localized, bc):
cimg = cv2.resize(localized, (30, 30))
bordersize = 1
nimg = cv2.copyMakeBorder(cimg, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType=cv2.BORDER_CONSTANT, value=[255-bc, 0, 0])
return main_image, nimg
@q-viper
q-viper / recognition.py
Created November 16, 2019 17:09
Recoginiton of the segments.
from preprocess import preprocess, detect_text, localize
from predictor import prediction
import numpy as np
import matplotlib.pyplot as plt
import cv2
def recognition(gray_image, show):
segments, template, th_img, text_color = preprocess(gray_image)
labels = []
accuracy = []
@q-viper
q-viper / predictor.py
Created November 16, 2019 17:11
Using Keras Model for Prediction.
import numpy as np
from keras.models import model_from_json
from keras.models import load_model
def prediction(img):
# load json and create model
json_file = open('cnn2\cnn2.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
@q-viper
q-viper / camera.py
Created November 16, 2019 17:14
Using camera for realtime work.
import cv2
from recognition import recognition
import numpy as np
import time
import matplotlib.pyplot as plt
def camera(flag):
# choice = print("Click spacebar for photo and anything else for video.\n")
orig = 1
cap = cv2.VideoCapture(0)
@q-viper
q-viper / main.py
Created November 16, 2019 17:16
Combining it all.
from recognition import recognition
import cv2
import matplotlib.pyplot as plt
from video_test import camera
import time
try:
test = input('Please enter the image directory with name.\n')
test = cv2.imread(test, 0)
plt.imshow(cv2.cvtColor(test, cv2.COLOR_GRAY2RGB))
def show(img, figsize=(10, 10)):
figure = plt.figure(figsize=figsize)
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.show()
cap = cv2.VideoCapture(0) #run camera with device
imgs = [] #list to hold frames