Skip to content

Instantly share code, notes, and snippets.

@UnaNancyOwen
Last active January 19, 2024 07:00
Show Gist options
  • Save UnaNancyOwen/49df508ad8b6d9520024354df0e3e740 to your computer and use it in GitHub Desktop.
Save UnaNancyOwen/49df508ad8b6d9520024354df0e3e740 to your computer and use it in GitHub Desktop.
OpenCV ObjDetect Module Face Recognition (SFace) Sample

OpenCV ObjDetect Module Face Recognition (SFace) Sample

    1. Generate Aligned Faces
    python generate_aligned_faces.py image.jpg
    
    • Input
      input

    • Outputs
      face001
      face002

    1. Generate Feature Dictionary
    python generate_feature_dictionary.py face001.jpg
    python generate_feature_dictionary.py face002.jpg
    
    • Inputs
      face001
      face002

    • Outputs

      • face001.npy
      • face002.npy
  • Face Recognizer

    • Input
      input

    • Output (NOTE: label is file name of dictionary)
      output

Download

yunet.onnx
face_recognizer_fast.onnx

Reference

DNN-based Face Detection And Recognition | OpenCV Tutorials
cv::FaceDetectorYN Class Reference | OpenCV Online Documentation
cv::FaceRecognizerSF Class Reference | OpenCV Online Documentation

Update

2023/06/27 OpenCV DNN based Face Detection API/Model will update to YuNet v2 from OpenCV 4.8.0. If you want more detail, Please refer to Pull Request.

import os
import sys
import glob
import numpy as np
import cv2
COSINE_THRESHOLD = 0.363
NORML2_THRESHOLD = 1.128
# 特徴を辞書と比較してマッチしたユーザーとスコアを返す関数
def match(recognizer, feature1, dictionary):
for element in dictionary:
user_id, feature2 = element
score = recognizer.match(feature1, feature2, cv2.FaceRecognizerSF_FR_COSINE)
if score > COSINE_THRESHOLD:
return True, (user_id, cos_score)
return False, ("", 0.0)
def main():
# キャプチャを開く
directory = os.path.dirname(__file__)
capture = cv2.VideoCapture(os.path.join(directory, "image.jpg")) # 画像ファイル
#capture = cv2.VideoCapture(0) # カメラ
if not capture.isOpened():
exit()
# 特徴を読み込む
dictionary = []
files = glob.glob(os.path.join(directory, "*.npy"))
for file in files:
feature = np.load(file)
user_id = os.path.splitext(os.path.basename(file))[0]
dictionary.append((user_id, feature))
# モデルを読み込む
weights = os.path.join(directory, "yunet.onnx")
face_detector = cv2.FaceDetectorYN_create(weights, "", (0, 0))
weights = os.path.join(directory, "face_recognizer_fast.onnx")
face_recognizer = cv2.FaceRecognizerSF_create(weights, "")
while True:
# フレームをキャプチャして画像を読み込む
result, image = capture.read()
if result is False:
cv2.waitKey(0)
break
# 画像が3チャンネル以外の場合は3チャンネルに変換する
channels = 1 if len(image.shape) == 2 else image.shape[2]
if channels == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if channels == 4:
image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
# 入力サイズを指定する
height, width, _ = image.shape
face_detector.setInputSize((width, height))
# 顔を検出する
result, faces = face_detector.detect(image)
faces = faces if faces is not None else []
for face in faces:
# 顔を切り抜き特徴を抽出する
aligned_face = face_recognizer.alignCrop(image, face)
feature = face_recognizer.feature(aligned_face)
# 辞書とマッチングする
result, user = match(face_recognizer, feature, dictionary)
# 顔のバウンディングボックスを描画する
box = list(map(int, face[:4]))
color = (0, 255, 0) if result else (0, 0, 255)
thickness = 2
cv2.rectangle(image, box, color, thickness, cv2.LINE_AA)
# 認識の結果を描画する
id, score = user if result else ("unknown", 0.0)
text = "{0} ({1:.2f})".format(id, score)
position = (box[0], box[1] - 10)
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.6
cv2.putText(image, text, position, font, scale, color, thickness, cv2.LINE_AA)
# 画像を表示する
cv2.imshow("face recognition", image)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
import os
import argparse
import numpy as np
import cv2
def main():
# 引数をパースする
parser = argparse.ArgumentParser("generate aligned face images from an image")
parser.add_argument("image", help="input image file path (./image.jpg)")
args = parser.parse_args()
# 引数から画像ファイルのパスを取得
path = args.image
directory = os.path.dirname(args.image)
if not directory:
directory = os.path.dirname(__file__)
path = os.path.join(directory, args.image)
# 画像を開く
image = cv2.imread(path)
if image is None:
exit()
# 画像が3チャンネル以外の場合は3チャンネルに変換する
channels = 1 if len(image.shape) == 2 else image.shape[2]
if channels == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if channels == 4:
image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
# モデルを読み込む
weights = os.path.join(directory, "yunet.onnx")
face_detector = cv2.FaceDetectorYN_create(weights, "", (0, 0))
weights = os.path.join(directory, "face_recognizer_fast.onnx")
face_recognizer = cv2.FaceRecognizerSF_create(weights, "")
# 入力サイズを指定する
height, width, _ = image.shape
face_detector.setInputSize((width, height))
# 顔を検出する
_, faces = face_detector.detect(image)
# 検出された顔を切り抜く
aligned_faces = []
if faces is not None:
for face in faces:
aligned_face = face_recognizer.alignCrop(image, face)
aligned_faces.append(aligned_face)
# 画像を表示、保存する
for i, aligned_face in enumerate(aligned_faces):
cv2.imshow("aligned_face {:03}".format(i + 1), aligned_face)
cv2.imwrite(os.path.join(directory, "face{:03}.jpg".format(i + 1)), aligned_face)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
import os
import sys
import argparse
import numpy as np
import cv2
def main():
# 引数をパースする
parser = argparse.ArgumentParser("generate face feature dictionary from an face image")
parser.add_argument("image", help="input face image file path (./face.jpg)")
args = parser.parse_args()
print(args.image)
# 引数から画像ファイルのパスを取得
path = args.image
directory = os.path.dirname(args.image)
if not directory:
directory = os.path.dirname(__file__)
path = os.path.join(directory, args.image)
# 画像を開く
image = cv2.imread(path)
if image is None:
exit()
# 画像が3チャンネル以外の場合は3チャンネルに変換する
channels = 1 if len(image.shape) == 2 else image.shape[2]
if channels == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if channels == 4:
image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
# モデルを読み込む
weights = os.path.join(directory, "face_recognizer_fast.onnx")
face_recognizer = cv2.FaceRecognizerSF_create(weights, "")
# 特徴を抽出する
face_feature = face_recognizer.feature(image)
print(face_feature)
print(type(face_feature))
# 特徴を保存する
basename = os.path.splitext(os.path.basename(args.image))[0]
dictionary = os.path.join(directory, basename)
np.save(dictionary, face_feature)
if __name__ == '__main__':
main()
@waragai-katsunori
Copy link

return True, (user_id, cos_score)

return True, (user_id, score)
と置き換えると、face_recognizer.py は動作しました。

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment