Skip to content

Instantly share code, notes, and snippets.

@wwj718
Last active November 26, 2018 07:51
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save wwj718/235b53953eb071d0321a8198a9d8bb6c to your computer and use it in GitHub Desktop.
Save wwj718/235b53953eb071d0321a8198a9d8bb6c to your computer and use it in GitHub Desktop.
'''
来源
http://dlib.net/face_landmark_detection.py.html
依赖:
机器视觉
opencv
pip3 install opencv-python
dlib
brew install cmake dlib
pip3 install dlib # 稍等片刻
摄像头用imutils
VideoStream
todo:
考虑树莓派 mobilenet/ssd + opencv
如何流向scratch
启动,使用独立的进程,启停,还要能返回数据,不断print
adapter使用
sys.path 指向python3 一个ai目录 codelab_ai
注意相机 斜向上指向脸部
'''
# USAGE
# python3 video_facial_landmarks.py --shape-predictor shape_predictor_68_face_landmarks.dat
# python3 video_facial_landmarks.py --shape-predictor shape_predictor_68_face_landmarks.dat --picamera 1
# import the necessary packages
from imutils.video import VideoStream
from imutils import face_utils
import datetime
import argparse
import imutils
import time
import dlib
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
# 加载model路径 dlib’s pre-trained facial landmark detector.
# 同级目录下的文件
ap.add_argument("-r", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] camera sensor warming up...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 400 pixels, and convert it to
# grayscale
frame = vs.read()
frame = imutils.resize(frame, width=480) # 使用scratch的尺寸 480x360
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
key = cv2.waitKey(1) & 0xFF
# loop over the face detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect) # 是什么
shape = face_utils.shape_to_np(shape) # 是否可以用json发过去 shape.tolist()
if key == ord("i"):
from IPython import embed;embed()
# 找到眼镜所在的位置,索引
# (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# loop over the (x, y)-coordinates for the facial landmarks
# and draw them on the image
for (x, y) in shape:
cv2.circle(frame, (x, y), 1, (0, 0, 255), -1) # 红色小点 不断发布json
# show the frame
cv2.imshow("Frame", frame) # 展示出图
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment