Skip to content

Instantly share code, notes, and snippets.

@YoshiRi
Last active June 22, 2017 16:46
Show Gist options
  • Save YoshiRi/4dd744f762954797ca77a9ce03ba14a8 to your computer and use it in GitHub Desktop.
Save YoshiRi/4dd744f762954797ca77a9ce03ba14a8 to your computer and use it in GitHub Desktop.
face recognizing for static image
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 23 00:05:14 2017
@author: ossyaritoori
"""
#coding: utf-8
import cv2
import numpy as np
# find haar classifier
faceCascade = cv2.CascadeClassifier('/home/yoshi/anaconda3/pkgs/opencv3-3.2.0-np111py35_0/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
img = cv2.imread('lena.png', cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face = faceCascade.detectMultiScale(gray, 1.1, 3)
# write rectangle in detected face
if len(face) > 0:
for rect in face:
cv2.rectangle(img, tuple(rect[0:2]), tuple(rect[0:2]+rect[2:4]), (0, 0,255), thickness=2)
else:
print("no face")
cv2.imshow('detected',img)
cv2.waitKey(0)
cv2.imwrite('detected.png', img)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 23 00:39:48 2017
@author: ossyaritoori
"""
import cv2
# cascade
cascade = cv2.CascadeClassifier('/home/yoshi/anaconda3/pkgs/opencv3-3.2.0-np111py35_0/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
# Capture from camera
cap = cv2.VideoCapture(0)
color = (255, 255, 255) # white
while(True):
# get frame from video stream
ret, frame = cap.read()
#face recognition
facerect = cascade.detectMultiScale(frame, scaleFactor=1.2, minNeighbors=2, minSize=(10, 10))
for rect in facerect:
# put rectangle
cv2.rectangle(frame, tuple(rect[0:2]),tuple(rect[0:2] + rect[2:4]), color, thickness=2)
# show
cv2.imshow("Show FLAME Image", frame)
# end after 'q' pressed
k = cv2.waitKey(1)
if k == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 23 01:18:52 2017
@author: yoshi
"""
#webカメラの映像から顔を探し白の枠線をつけて保存するプログラム
import cv2
import threading
from datetime import datetime
class FaceThread(threading.Thread):
def __init__(self, frame):
super(FaceThread, self).__init__()
self._cascade_path = '/home/yoshi/anaconda3/pkgs/opencv3-3.2.0-np111py35_0/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml'
self._frame = frame
def run(self):
#グレースケール変換
self._frame_gray = cv2.cvtColor(self._frame, cv2.COLOR_BGR2GRAY)
#カスケード分類器の特徴量を取得する
self._cascade = cv2.CascadeClassifier(self._cascade_path)
#物体認識(顔認識)の実行 -> each rectangle coordinate of face is in _facerect
self._facerect = self._cascade.detectMultiScale(self._frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(10, 10))
if len(self._facerect) > 0:
print('Detected Face!')
self._color = (255, 255, 255) #白
for self._rect in self._facerect:
##検出した顔を囲む矩形の作成
#cv2.rectangle(self._frame, tuple(self._rect[0:2]),tuple(self._rect[0:2] + self._rect[2:4]), self._color, thickness=2)
#現在の時間を取得
self._now = datetime.now().strftime('%Y%m%d%H%M%S')
self._image_path = 'faces/' + self._now + '.jpg'
#切り出し
self.x = self._rect[0]
self.y = self._rect[1]
self.width = self._rect[2]
self.height = self._rect[3]
self.dst = self._frame[self.y:self.y+self.height, self.x:self.x+self.width]
#認識結果の保存
cv2.imwrite(self._image_path, self.dst)
# main
# カメラをキャプチャ開始
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
while(cap.isOpened()):
ret, frame = cap.read()
#frameを表示
cv2.imshow('camera capture', frame)
if(threading.activeCount() == 1):
th = FaceThread(frame)
th.start()
#10msecキー入力待ち
k = cv2.waitKey(10)
#Escキー or'q' を押されたら終了
if k == 27:
break
if k == ord('q'):
break
#キャプチャを終了
cap.release()
cv2.destroyAllWindows()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment