Skip to content

Instantly share code, notes, and snippets.

@ibmua
Last active February 22, 2021 14:17
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ibmua/a6c440b3323c0dff851be0e693a07dad to your computer and use it in GitHub Desktop.
Save ibmua/a6c440b3323c0dff851be0e693a07dad to your computer and use it in GitHub Desktop.
extract-1adrianb-crop-face-alignment
# for use with https://github.com/1adrianb/face-alignment
import cv2
import numpy as np
import face_alignment
imread = cv2.imread
_DEAFAULT_JPG_QUALITY = 99
imwrite = partial(cv2.imwrite, params=[int(cv2.IMWRITE_JPEG_QUALITY), _DEAFAULT_JPG_QUALITY])
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device='cuda')
# fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, face_detector='dlib')
def cutout(ipath):
global fa
if isinstance(ipath, str):
original255 = skimage.io.imread(ipath).astype(np.ubyte)
else:
original255 = ipath.copy()
ipath = '_'
# original255 = imread(ipath)
# print(original255.shape)
if original255.shape[-1] == 4:
original255 = original255[:, :, :3]
frame = original255
print('frame',frame)
# Run the face alignment tracker on the webcam image
imagePoints = fa.get_landmarks_from_image(frame)
print('')
print('imagePoints',imagePoints)
print('')
chipSize = 512
chipCorners = np.float32([[0,0],
[chipSize,0],
[0,chipSize],
[chipSize,chipSize]])
if(imagePoints is not None):
print('processing points')
imagePoints = imagePoints[0]
# Compute the Anchor Landmarks
# This ensures the eyes and chin will not move within the chip
rightEyeMean = np.mean(imagePoints[36:42], axis=0)
leftEyeMean = np.mean(imagePoints[42:47], axis=0)
middleEye = (rightEyeMean + leftEyeMean) * 0.5
chin = imagePoints[8]
#cv2.circle(frame, tuple(rightEyeMean[:2].astype(int)), 30, (255,255,0))
#cv2.circle(frame, tuple(leftEyeMean [:2].astype(int)), 30, (255,0,255))
# Compute the chip center and up/side vectors
mean = middleEye[:2] #((middleEye * 3) + chin) * 0.25
# centered = imagePoints - mean
upVector = (chin - middleEye)[:2] * 1.2
rightVector = np.array([ upVector[1], -upVector[0] ])
# Compute the corners of the facial chip
imageCorners = np.float32([(mean + ((-rightVector - upVector))),
(mean + (( rightVector - upVector))),
(mean + ((-rightVector + upVector))),
(mean + (( rightVector + upVector)))])
# Compute the Perspective Homography and Extract the chip from the image
chipMatrix = cv2.getPerspectiveTransform(imageCorners, chipCorners)
chip = cv2.warpPerspective(frame, chipMatrix, (chipSize, chipSize))
return chip
img = imread(img_path)
crop = cutout(img)
imwrite(new_img_path, crop)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment