Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
restricted face compare
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
//
// try to find a frontal face 1st, if that fails, fall back to profile
//
bool findFace(const Mat &in, Mat &res, bool &isProfile, CascadeClassifier &casc, CascadeClassifier &casc_p)
{
isProfile = false;
vector<Rect> rects;
casc.detectMultiScale(in, rects, 1.3, 4);
if (rects.size() > 0)
{
res = in(rects[0]);
return true;
}
// left side profile
casc_p.detectMultiScale(in, rects, 1.3, 4);
if (rects.size() > 0)
{
cerr << "profile_l" << endl;
res = in(rects[0]);
isProfile=true;
return true;
}
// right side profile:
flip(in,in,1); // horizontal
casc_p.detectMultiScale(in, rects, 1.3, 4);
if (rects.size() > 0)
{
cerr << "profile_r" << endl;
res = in(rects[0]);
isProfile=true;
return true;
}
res = in;
return false; // no face found
}
//
// try to align the img, so the eyes are on a horizontal axis and
// scale to some fixed eye-distance.
//
// if you rotate an image, you need some 'room to move', so the img should be resized
// to something larger before, then we can crop it later again.
// 250x250 for the input img, and crop to 100x100 worked best for me.
//
Mat align2d(const Mat &img, Point eye_l, Point eye_r, int crop=100, double eyed=44.0)
{
Mat test=img;
double eyeXdis = eye_r.x - eye_l.x;
double eyeYdis = eye_r.y - eye_l.y;
double angle = atan(eyeYdis/eyeXdis);
double degree = angle*180/CV_PI;
double scale = eyed / eyeXdis; // scale to lfw eye distance
Mat res;
Point2f center(test.cols/2, test.rows/2);
Mat rot = getRotationMatrix2D(center, degree, scale);
warpAffine(test, res, rot, Size(), INTER_CUBIC, BORDER_CONSTANT, Scalar(127));
if (0) // visualization
{
Mat t = test.clone();
circle(t, eye_l, 3, Scalar(255));
circle(t, eye_r, 3, Scalar(255));
imshow("test2",t);
imshow("testr",res);
}
return res(Rect(center.x-crop/2, center.y-crop/2, crop,crop));
}
//
// i'll use the eye_cascade to find the eye pos here for simplicity
//
Mat preproc(const Mat &img, CascadeClassifier &cc, CascadeClassifier &cc_p, CascadeClassifier &cc_e, int fixed=100)
{
Mat res;
bool isProfile;
//if ( ! findFaceRot(img,res,isProfile, cc,cc_p) )
if ( ! findFace(img,res,isProfile, cc,cc_p) )
{
return res;
}
if ( ! isProfile )
{
resize(res, res, Size(250,250), INTER_CUBIC);
vector<Rect> eyes;
cc_e.detectMultiScale(res,eyes);
if (eyes.size() == 2)
{
Point e0 = eyes[0].tl() + (eyes[0].br() - eyes[0].tl())/2;
Point e1 = eyes[1].tl() + (eyes[1].br() - eyes[1].tl())/2;
if (e0.x < e1.x)
return align2d(res,e0,e1,fixed,44.0);
else
return align2d(res,e1,e0,fixed,44.0);
}
}
// all images must have same size for comparison later.
resize(res,res,Size(fixed,fixed));
return res;
}
//
// if you decide to use dlib to find the eyes, some additional snippets:
//
//dlib::shape_predictor sp;
//
//// it needs a pretrained model, and it's only 95 mb ;)
//dlib::deserialize("D:/Temp/dlib-18.10/examples/shape_predictor_68_face_landmarks.dat") >> sp;
//
//
////! expects grayscale img
//void getkp2d(const Mat &I, vector<Point2d> &pts2d, const Rect &r) const
//{
// dlib::rectangle rec(r.x, r.y, r.x+r.width, r.y+r.height);
// dlib::full_object_detection shape = sp(dlib::cv_image<uchar>(I), rec);
//
// for(size_t k=0; k<shape.num_parts(); k++) // 68 parts
// {
// Point2d p(shape.part(k).x(), shape.part(k).y());
// pts2d.push_back(p);
// }
//}
//
//// get landmarks
//vector<Point2d> pts2d;
//getkp2d(test, pts2d, Rect(0, 0, test.cols, test.rows));
//
//// interpolate pupil position
//Point2d eye_l = (pts2d[37] + pts2d[38] + pts2d[40] + pts2d[41]) * 0.25; // left eye center
//Point2d eye_r = (pts2d[43] + pts2d[44] + pts2d[46] + pts2d[47]) * 0.25; // right eye center
//
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.