Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save reinforce-lab/e0e08718c69218f8368b7f7d0bec780a to your computer and use it in GitHub Desktop.
Save reinforce-lab/e0e08718c69218f8368b7f7d0bec780a to your computer and use it in GitHub Desktop.
jupyter notebookで、カムの画像から、顔の形状とその大きさを解析表示する。
# https://stackoverflow.com/questions/37210655/opencv-detect-face-landmarks-ear-chin-ear-line
# 結果サンプルは prCIc.png を参照。
#FACE_POINTS = list(range(17, 68))
#MOUTH_POINTS = list(range(48, 61))
#RIGHT_BROW_POINTS = list(range(17, 22))
#LEFT_BROW_POINTS = list(range(22, 27))
#RIGHT_EYE_POINTS = list(range(36, 42))
#LEFT_EYE_POINTS = list(range(42, 48))
#NOSE_POINTS = list(range(27, 35))
#JAW_POINTS = list(range(0, 17))
#CHIN_POINTS=list(range(6,11))
import cv2
import numpy as np
from matplotlib import pyplot as plt
import dlib
# 顔のランドマーク検出。
# https://www.pyimagesearch.com/?s=face+landmark&submit=Search
# http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
#PREDICTOR_PATH = "shape_predictor_5_face_landmarks.dat"
predictor = dlib.shape_predictor(PREDICTOR_PATH)
cascade_path='haarcascade_frontalface_default.xml'
cascade = cv2.CascadeClassifier(cascade_path)
# #This is using the Dlib Face Detector . Better result more time taking
#def get_landmarks(im):
# rects = detector(im, 1)
# rect=rects[0]
# print type(rect.width())
# fwd=int(rect.width())
# if len(rects) == 0:
# return None,None
# return np.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()]),fwd
def get_landmarks(im):
rects = cascade.detectMultiScale(im, 1.3,5)
if len(rects) == 0:
return []
x,y,w,h =rects[0]
rect=dlib.rectangle(x,y,x+w,y+h)
object_detection=predictor(im, rect)
# print(object_detection.parts())
return object_detection.parts()
# return np.matrix([[p.x, p.y] for p in object_detection.parts()])
def get_length(points):
length = 0
point = points[0]
for p in points:
# print(point, p)
diff = p - point
dist = np.sqrt(pow(diff.x,2) + pow(diff.y,2))
length += dist
point = p
return length
# 検出した特徴点を画像に描画するメソッドです。
def annotate_landmarks(im, parts):
# im = im.copy()
landmarks = np.matrix([[p.x, p.y] for p in parts])
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.2,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im
# 顔の線分の長さなどのテキストを、画像に表示するメソッドです。
def annotate_text(im, point, text, color=(255, 0,0)):
# im = im.copy()
cv2.putText(im, text, point,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.6,
color=color)
return im
#顔の線分などを画像に表示するメソッドです。
def annotate_line(im, parts, lineColor=(255, 0, 0)):
# im = im.copy()
landmarks = np.matrix([[p.x, p.y] for p in parts])
startPoint = landmarks[0]
for idx, point in enumerate(landmarks):
p1 = (startPoint[0, 0], startPoint[0, 1])
p2 = (point[0, 0], point[0, 1])
cv2.line(im, p1, p2, color=lineColor, thickness=3)
startPoint = point
return im
cap = cv2.VideoCapture(0)
while True:
ret_value, img = cap.read()
img = cv2.flip(img, 1)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
parts = get_landmarks(img)
if len(parts) > 0:
left_cheek = parts[2:8]
right_cheek = parts[9:15]
left_cheek_line = [parts[2], parts[31]]
right_cheek_line = [parts[14], parts[35]]
#print(left_cheek, right_cheek)
#print(left_cheek_line, right_cheek_line)
print("左の頬の長さ {:.0f}".format(get_length(left_cheek)))
print("左の頬-鼻の距離 {:.0f}".format(get_length(left_cheek_line)))
print("右の頬の長さ {:.0f}".format(get_length(right_cheek)))
print("右の頬-鼻の距離 {:.0f}".format(get_length(right_cheek_line)))
img = annotate_landmarks(img, parts)
img = annotate_line(img, left_cheek, (0, 250, 0))
img = annotate_line(img, left_cheek_line)
img = annotate_line(img, right_cheek, (0, 250, 0))
img = annotate_line(img, right_cheek_line)
img = annotate_text(img, (parts[2].x - 80, parts[2].y + 40), "{:.0f}".format(get_length(left_cheek)), (0, 250, 0))
img = annotate_text(img, (parts[31].x - 20, parts[31].y - 20), "{:.0f}".format(get_length(left_cheek_line)))
img = annotate_text(img, (parts[14].x + 80, parts[14].y + 40), "{:.0f}".format(get_length(right_cheek)), (0, 250, 0))
img = annotate_text(img, (parts[35].x + 20, parts[35].y - 20), "{:.0f}".format(get_length(right_cheek_line)))
# plt.figure(figsize=(10,10))
cv2.imshow('face', img)
if cv2.waitKey(1) == ord('q'):
break
cap.release
cv2.destroyAllWindows()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment