sudo apt install libopencv-dev -y;
sudo apt install libatlas-base-dev -y;
sudo apt install libjasper-dev -y;
sudo apt install qt4-dev -y;
sudo pip3 install opencv-python
sudo pip install opencv-contrib-python
Created
June 1, 2018 02:32
-
-
Save jinyu121/8e4a0b8faa49e462a94c2982d530572b to your computer and use it in GitHub Desktop.
OpenCV Face Recognition Demo
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import cv2 | |
cam = cv2.VideoCapture(0) | |
cam.set(3, 640) # set video width | |
cam.set(4, 480) # set video height | |
face_detector = cv2.CascadeClassifier('conf/haarcascade_frontalface_default.xml') | |
# For each person, enter one numeric face id | |
names_count = [x.strip() for x in open('conf/names.txt').readlines() if x.strip() != ""] | |
face_id = len(names_count) + 1 | |
while True: | |
name = input('User name: ').strip() | |
if name and name not in names_count: | |
with open('conf/names.txt', "a") as f: | |
f.write("\n" + name) | |
break | |
print("Initializing face capture. Look the camera and wait ...") | |
# Initialize individual sampling face count | |
count = 0 | |
while True: | |
ret, img = cam.read() | |
# img = cv2.flip(img, -1) # flip video image vertically | |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
faces = face_detector.detectMultiScale(gray, 1.3, 5) | |
for (x, y, w, h) in faces: | |
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) | |
count += 1 | |
# Save the captured image into the datasets folder | |
cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y + h, x:x + w]) | |
cv2.imshow('image', img) | |
k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video | |
if k == 27: | |
break | |
elif count >= 30: # Take 30 face sample and stop video | |
break | |
# Do a bit of cleanup | |
print("Exiting Program and cleanup stuff") | |
cam.release() | |
cv2.destroyAllWindows() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import cv2 | |
import numpy as np | |
from PIL import Image | |
import os | |
# Path for face image database | |
path = 'dataset' | |
recognizer = cv2.face.LBPHFaceRecognizer_create() | |
detector = cv2.CascadeClassifier("conf/haarcascade_frontalface_default.xml"); | |
# function to get the images and label data | |
def getImagesAndLabels(path): | |
imagePaths = [os.path.join(path, f) for f in os.listdir(path)] | |
faceSamples = [] | |
ids = [] | |
for imagePath in imagePaths: | |
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale | |
img_numpy = np.array(PIL_img, 'uint8') | |
id = int(os.path.split(imagePath)[-1].split(".")[1]) | |
faces = detector.detectMultiScale(img_numpy) | |
for (x, y, w, h) in faces: | |
faceSamples.append(img_numpy[y:y + h, x:x + w]) | |
ids.append(id) | |
return faceSamples, ids | |
print("[INFO] Training faces. It will take a few seconds. Wait ...") | |
faces, ids = getImagesAndLabels(path) | |
recognizer.train(faces, np.array(ids)) | |
# Save the model into trainer/trainer.yml | |
recognizer.write('conf/model.yml') # recognizer.save() worked on Mac, but not on Pi | |
# Print the numer of faces trained and end program | |
print("[INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids)))) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import cv2 | |
recognizer = cv2.face.LBPHFaceRecognizer_create() | |
recognizer.read('conf/model.yml') | |
cascadePath = "conf/haarcascade_frontalface_default.xml" | |
faceCascade = cv2.CascadeClassifier(cascadePath); | |
font = cv2.FONT_HERSHEY_SIMPLEX | |
# iniciate id counter | |
id = 0 | |
# names related to ids: example ==> Marcelo: id=1, etc | |
names = ['Unknown'] + [x.strip() for x in open('conf/names.txt').readlines() if x.strip() != ""] | |
# Initialize and start realtime video capture | |
cam = cv2.VideoCapture(0) | |
cam.set(3, 640) # set video widht | |
cam.set(4, 480) # set video height | |
# Define min window size to be recognized as a face | |
minW = 0.1 * cam.get(3) | |
minH = 0.1 * cam.get(4) | |
while True: | |
ret, img = cam.read() | |
# img = cv2.flip(img, -1) # Flip vertically | |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
faces = faceCascade.detectMultiScale( | |
gray, | |
scaleFactor=1.2, | |
minNeighbors=5, | |
minSize=(int(minW), int(minH)), | |
) | |
for (x, y, w, h) in faces: | |
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2) | |
id, confidence = recognizer.predict(gray[y:y + h, x:x + w]) | |
# Check if confidence is less them 100 ==> "0" is perfect match | |
if (confidence < 100): | |
id = names[id] | |
confidence = " {0}%".format(round(100 - confidence)) | |
else: | |
id = "unknown" | |
confidence = " {0}%".format(round(100 - confidence)) | |
cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255), 2) | |
cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1, (255, 255, 0), 1) | |
cv2.imshow('camera', img) | |
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video | |
if k == 27: | |
break | |
# Do a bit of cleanup | |
print("[INFO] Exiting Program and cleanup stuff") | |
cam.release() | |
cv2.destroyAllWindows() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment