Skip to content

Instantly share code, notes, and snippets.

@sarmadm
Last active January 12, 2017 17:43
Show Gist options
  • Save sarmadm/eda20d68650120d3d7f48cf73f7e3bc3 to your computer and use it in GitHub Desktop.
Save sarmadm/eda20d68650120d3d7f48cf73f7e3bc3 to your computer and use it in GitHub Desktop.
#include <dlib/opencv.h>
#include <opencv2/highgui/highgui.hpp>
#include <dlib/image_processing/frontal_face_detector.h>
#include <dlib/image_processing/render_face_detections.h>
#include <dlib/image_processing.h>
#include <dlib/gui_widgets.h>
using namespace dlib;
using namespace std;
int main()
{
try
{
cv::VideoCapture cap("1.avi");
if (!cap.isOpened())
{
cerr << "Unable to connect to camera" << endl;
return 1;
}
image_window win;
// int nframes = cap.get(CAP_PROP_FRAME_COUNT);
// cout<<"Frame count _cap_prop_frame_"<<nframes<<endl;
frontal_face_detector detector = get_frontal_face_detector();
shape_predictor pose_model;
deserialize("shape_predictor_68_face_landmarks.dat") >> pose_model;
ofstream EAR_OUTPUT_left_EYE;
ofstream EAR_OUTPUT_right_EYE;
EAR_OUTPUT_left_EYE.open("EAR_LEFT.csv");
EAR_OUTPUT_right_EYE.open("EAR_RIGHT.csv");
// double nframes = video.get(CAP_PROP_FRAME_COUNT);
int c=0;
while(!win.is_closed())
{
// Grab a frame
cv::Mat temp;
cap >> temp;
if ( temp.empty())
{
// reach to the end of the video file
break;
}
c++;
std::cout<<"count ="<<c<<endl;
cv_image<bgr_pixel> cimg(temp);
std::vector<rectangle> faces = detector(cimg);
std::vector<full_object_detection> shapes;
for (unsigned long i = 0; i < faces.size(); ++i)
{
full_object_detection shape = pose_model(cimg, faces[i]);
cout << "number of parts: "<< shape.num_parts() << endl;
cout << "Eye Landmark points for right eye : "<< endl;
cout << "pixel position of 36 part: " << shape.part(36) << endl;
cout << "pixel position of 37 part: " << shape.part(37) << endl;
cout << "pixel position of 38 part: " << shape.part(38) << endl;
cout << "pixel position of 39 part: " << shape.part(39) << endl;
cout << "pixel position of 40 part: " << shape.part(40) << endl;
cout << "pixel position of 41 part: " << shape.part(41) << endl;
cout << endl;
cout << "Eye Landmark points for left eye : "<< endl;
cout << "pixel position of 42 part: " << shape.part(42) << endl;
cout << "pixel position of 43 part: " << shape.part(43) << endl;
cout << "pixel position of 44 part: " << shape.part(44) << endl;
cout << "pixel position of 45 part: " << shape.part(45) << endl;
cout << "pixel position of 46 part: " << shape.part(46) << endl;
cout << "pixel position of 47 part: " << shape.part(47) << endl;
double P37_41_x = shape.part(37).x() - shape.part(41).x();
double P37_41_y= shape.part(37).y() -shape.part(41).y() ;
double p37_41_sqrt=sqrt((P37_41_x * P37_41_x) + (P37_41_y * P37_41_y));
double P38_40_x = shape.part(38).x() - shape.part(40).x();
double P38_40_y = shape.part(38).y() - shape.part(40).y();
double p38_40_sqrt=sqrt((P38_40_x * P38_40_x) + (P38_40_y * P38_40_y));
double P36_39_x = shape.part(36).x() - shape.part(39).x();
double P36_39_y = shape.part(36).y() - shape.part(39).y();
double p36_39_sqrt=sqrt((P36_39_x * P36_39_x) + (P36_39_y * P36_39_y));
double EAR_R= (p37_41_sqrt + p38_40_sqrt)/(2* p36_39_sqrt);
cout << "EAR_RIGHT_EYE = " << EAR_R << endl;
double P43_47_x = shape.part(43).x() - shape.part(47).x();
double P43_47_y= shape.part(43).y() -shape.part(47).y() ;
double p43_47_sqrt=sqrt((P43_47_x * P43_47_x) + (P43_47_y * P43_47_y));
double P44_46_x = shape.part(44).x() - shape.part(46).x();
double P44_46_y = shape.part(44).y() - shape.part(46).y();
double p44_46_sqrt=sqrt((P44_46_x * P44_46_x) + (P44_46_y * P44_46_y));
double P42_45_x = shape.part(42).x() - shape.part(45).x();
double P42_45_y = shape.part(42).y() - shape.part(45).y();
double p42_45_sqrt=sqrt((P42_45_x * P42_45_x) + (P42_45_y * P42_45_y));
double EAR_L= (p43_47_sqrt+ p44_46_sqrt)/(2* p42_45_sqrt);
cout << "EAR_LEFT_EYE = " << EAR_L << endl;
shapes.push_back(pose_model(cimg, faces[i]));
const full_object_detection& d = shapes[0];
EAR_OUTPUT_left_EYE<<EAR_L<< endl;
EAR_OUTPUT_right_EYE<<EAR_R<< endl;
}
win.clear_overlay();
win.set_image(cimg);
win.add_overlay(render_face_detections(shapes));
}
EAR_OUTPUT_left_EYE.close();
EAR_OUTPUT_right_EYE.close();
}
catch(serialization_error& e)
{
cout << "You need dlib's default face landmarking model file to run this example." << endl;
cout << "You can get it from the following URL: " << endl;
cout << " http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2" << endl;
cout << endl << e.what() << endl;
}
catch(exception& e)
{
cout << e.what() << endl;
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment