Skip to content

Instantly share code, notes, and snippets.

@antimodular
Created February 15, 2016 00:11
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save antimodular/25b58df209e20b0bd541 to your computer and use it in GitHub Desktop.
Save antimodular/25b58df209e20b0bd541 to your computer and use it in GitHub Desktop.
#include "ofMain.h"
#include "ofApp.h"
//========================================================================
int main( ){
ofSetupOpenGL(1024,768,OF_WINDOW); // <-------- setup the GL context
// this kicks off the running of my app
// can be OF_WINDOW or OF_FULLSCREEN
// pass in width and height too:
ofRunApp(new ofApp());
}
//http://blog.dlib.net/2014/08/real-time-face-pose-estimation.html
//https://forum.openframeworks.cc/t/face-tracking-with-dlib/22082
#include "ofApp.h"
//--------------------------------------------------------------
void ofApp::setup(){
faceTracker.setup(1);
#ifdef USE_VIDEO_GRABBER
video.setDeviceID(0);
// video.setup(720, 480);
video.setup(320, 240);
#endif
bSelecting = false;
}
//--------------------------------------------------------------
void ofApp::update(){
ofSetWindowTitle(ofToString(ofGetFrameRate()));
#ifdef USE_VIDEO
video.update();
if(video.isFrameNew()){
// faceTracker.findFaces(video.getPixels());
faceTracker.findSelection(video.getPixels());
}
#endif
}
//--------------------------------------------------------------
void ofApp::draw(){
ofSetColor(255);
#ifdef USE_VIDEO
video.draw(0, 0);
videoRect = ofRectangle(0,0,video.getWidth(),video.getHeight());
#endif
if(bSelecting){
ofPushStyle();
ofSetColor(255, 0, 0);
ofNoFill();
ofDrawRectangle( ofRectangle(startPoint,endPoint));
ofPopStyle();
}
faceTracker.draw();
}
//--------------------------------------------------------------
void ofApp::keyPressed(int key){
}
//--------------------------------------------------------------
void ofApp::keyReleased(int key){
}
//--------------------------------------------------------------
void ofApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void ofApp::mouseDragged(int x, int y, int button){
endPoint = ofPoint(x,y);
}
//--------------------------------------------------------------
void ofApp::mousePressed(int x, int y, int button){
startPoint = ofPoint(x,y);
bSelecting = true;
}
//--------------------------------------------------------------
void ofApp::mouseReleased(int x, int y, int button){
endPoint = ofPoint(x,y);
if(videoRect.inside(startPoint) == true && videoRect.inside(endPoint) == true){
faceTracker.setNewSelection(ofRectangle(startPoint,endPoint));
}
bSelecting = false;
}
//--------------------------------------------------------------
void ofApp::mouseEntered(int x, int y){
}
//--------------------------------------------------------------
void ofApp::mouseExited(int x, int y){
}
//--------------------------------------------------------------
void ofApp::windowResized(int w, int h){
}
//--------------------------------------------------------------
void ofApp::gotMessage(ofMessage msg){
}
//--------------------------------------------------------------
void ofApp::dragEvent(ofDragInfo dragInfo){
}
#pragma once
#include "ofMain.h"
#include "ofxDLib.h"
#define USE_VIDEO_GRABBER
#define USE_VIDEO
class ofApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
void keyPressed(int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void mouseEntered(int x, int y);
void mouseExited(int x, int y);
void windowResized(int w, int h);
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
ofxDLib faceTracker;
#ifdef USE_VIDEO
#ifdef USE_VIDEO_GRABBER
ofVideoGrabber video;
#else
ofVideoPlayer video;
#endif
#else
ofImage ofImg;
#endif
ofPoint startPoint,endPoint;
ofRectangle videoRect;
bool bSelecting;
};
// ----------------------------------------------------------------------------------------
//
// ofxDLib.cpp
// DLibTest
//
// Created by Stephan Schulz on 2016-02-12.
//
//
#include "ofMain.h"
#include "ofxDLib.h"
ofRectangle toOf(const dlib::rectangle& r){
return ofRectangle(r.left(), r.top(), r.width(), r.height());
}
ofPoint toOf(const dlib::point& p){
return ofPoint(p.x(), p.y(), p.z() );
}
bool toDLib(const ofPixels& inPix, array2d<rgb_pixel>& outPix){
int width = inPix.getWidth();
int height = inPix.getHeight();
outPix.set_size( height, width );
int chans = inPix.getNumChannels();
const unsigned char* data = inPix.getData();
for ( unsigned n = 0; n < height;n++ )
{
const unsigned char* v = &data[n * width * chans];
for ( unsigned m = 0; m < width;m++ )
{
if ( chans==1 )
{
unsigned char p = v[m];
assign_pixel( outPix[n][m], p );
}
else{
rgb_pixel p;
p.red = v[m*3];
p.green = v[m*3+1];
p.blue = v[m*3+2];
assign_pixel( outPix[n][m], p );
}
}
}
// if(inPix.getNumChannels() == 3){
// int h = inPix.getHeight();
// int w = inPix.getWidth();
// outPix.clear();
// outPix.set_size(h,w);
// for (int i = 0; i < h; i++) {
// for (int j = 0; j < w; j++) {
//
// outPix[i][j].red = inPix.getColor(j, i).r; //inPix[i*w + j];
// outPix[i][j].green = inPix.getColor(j, i).g; //inPix[i*w + j + 1];
// outPix[i][j].blue = inPix.getColor(j, i).b; //inPix[i*w + j + 2];
// }
// }
// return true;
// }else{
// return false;
// }
return true;
}
ofxDLib::ofxDLib()
{
}
ofxDLib::~ofxDLib(){
//close();
}
void ofxDLib::setup(int _type){
uint64_t st =ofGetElapsedTimeMillis();
#ifdef USE_FACETRACKER
detector = get_frontal_face_detector();
cout << "get_frontal_face_detector: " << ofGetElapsedTimeMillis() - st << " ms." << endl;
//http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
ofFile f(ofToDataPath("shape_predictor_68_face_landmarks.dat"));
if (f.exists()) {
deserialize(f.getAbsolutePath()) >> shape_predictor_object;
}else{
cout << "SHAPE PREDICTOR DAT FILE MISSING!!!" << endl;
}
bDetect = true;
#endif
#ifdef USE_OBJECTTRACKER
selectionTrackerInit = false;
sel_x = 0;
sel_y = 0;
sel_w = 38;
sel_h = 86;
#endif
}
void ofxDLib::update(){
}
#ifdef USE_OBJECTTRACKER
void ofxDLib::setNewSelection(ofRectangle _rect){
selectionTrackerInit = false;
sel_x = _rect.getX();
sel_y = _rect.getY();
sel_w = _rect.getWidth();
sel_h = _rect.getHeight();
}
void ofxDLib::findSelection(const ofPixels& pixels){
array2d<rgb_pixel> img;
if(toDLib(pixels , img)){
if(selectionTrackerInit == false){
selectionTrackerInit = true;
// Now create a tracker and start a track on the juice box. If you look at the first
// frame you will see that the juice box is centered at pixel point(92,110) and 38
// pixels wide and 86 pixels tall.
cout<<"sel_x "<<sel_x<<" "<<sel_y<<" "<<sel_w;
tracker.start_track(img, rectangle(sel_x,sel_y, sel_x+sel_w, sel_y+sel_h));
// centered_rect(point(sel_x,sel_y), sel_w, sel_h));
}else{
tracker.update(img);
myObject.rect = toOf(tracker.get_position());
}
}
}
#endif
#ifdef USE_FACETRACKER
//--------------------------------------------------------------
void ofxDLib::findFaces(const ofPixels& pixels){
int64_t st =ofGetElapsedTimeMillis();
int64_t st2 = st;
array2d<rgb_pixel> img;
if(toDLib(pixels , img)){
// cout << "OfPixels to DlibPixels: " << ofGetElapsedTimeMillis() - st << " ms." << endl;
// Make the image larger so we can detect small faces.
st =ofGetElapsedTimeMillis();
//pyramid_up(img);
// cout << "PyramidUp: " << ofGetElapsedTimeMillis() - st << " ms." << endl;
// Now tell the face detector to give us a list of bounding boxes
// around all the faces in the image.
if(bDetect){
allFaces.clear();
dets.clear();
st =ofGetElapsedTimeMillis();
dets = detector(img);
// cout << "face detector: " << ofGetElapsedTimeMillis() - st << " ms." << endl;
// cout << "Number of faces detected: " << dets.size() << endl;
allFaces.resize(dets.size());
// Now we will go ask the shape_predictor to tell us the pose of
// each face we detected.
shapes.clear();
st =ofGetElapsedTimeMillis();
for (unsigned long j = 0; j < dets.size(); ++j){
full_object_detection shape = shape_predictor_object(img, dets[j]);
// cout << "number of parts: "<< shape.num_parts() << endl;
// cout << "pixel position of first part: " << shape.part(0) << endl;
// cout << "pixel position of second part: " << shape.part(1) << endl;
// // You get the idea, you can get all the face part locations if
// you want them. Here we just store them in shapes so we can
// put them on the screen.
shapes.push_back(shape);
allFaces[j].leftEyeCenter = getLeftEyeCenter(shape, 0);
allFaces[j].rightEyeCenter = getRightEyeCenter(shape, 0);
allFaces[j].rect = toOf(dets[j]);
//0 = jaw
//1 = nose bridge
//2 = left eyebrow
//3 = right eyebrow
//4 = under nose + tip
//5 = left eye
//6 = right eye
//7 = outer mouth
//8 = inner mouth
allFaces[j].jaw = getOneFeature(shape,0);
allFaces[j].noseBridge = getOneFeature(shape,1);
allFaces[j].leftEyebrow = getOneFeature(shape,2);
allFaces[j].rightEyebrow = getOneFeature(shape,3);
allFaces[j].noseTip = getOneFeature(shape,4);
allFaces[j].leftEye = getOneFeature(shape,5);
allFaces[j].rightEye = getOneFeature(shape,6);
allFaces[j].outerMouth = getOneFeature(shape,7);
allFaces[j].innerMouth = getOneFeature(shape,8);
}
// allFaces.push_back({0,0,});
// if(shapes.size() > 0){
//
// leftEyePos = getLeftEyeCenter(shapes, 0);
// rightEyePos = getRightEyeCenter(shapes, 0);
// oneFeature = getOneFeature(shapes, 0, 0);
// }
// get_interocular_distances(shapes));
// std::vector<float> = render_face_detections(shapes);
// cout << "full_object_detection: " << ofGetElapsedTimeMillis() - st << " ms." << endl;
// win.clear_overlay();
// win.set_image(img);
// win.add_overlay(render_face_detections(shapes));
// //
// // // We can also extract copies of each face that are cropped, rotated upright,
// // // and scaled to a standard size as shown here:
// dlib::array<array2d<rgb_pixel> > face_chips;
// extract_image_chips(img, get_face_chip_details(shapes), face_chips);
// win_faces.set_image(tile_images(face_chips));
}
}//*/
// cout << "findFaces: " << ofGetElapsedTimeMillis() - st2 << " ms." << endl;
// cout << "-------------------------------" << endl;
}
#endif
void ofxDLib::draw(){
#ifdef USE_FACETRACKER
ofPushStyle();
// ofSetColor(ofColor::yellow);
// ofSetLineWidth(1);
// ofNoFill();
// for (auto& r:dets) {
// ofDrawRectangle(toOf(r));
// }
// ofFill();
// ofSetColor(ofColor::yellow);
// for (auto & s:shapes) {
// for (int i = 0; i < s.num_parts(); i++) {
// ofDrawCircle(toOf(s.part(i)),3);
// }
// }
for(int i=0; i<allFaces.size();i++){
ofSetColor(ofColor::blue);
ofNoFill();
ofDrawRectangle(allFaces[i].rect);
ofSetColor(ofColor::red);
ofDrawCircle(allFaces[i].leftEyeCenter, 5);
ofSetColor(ofColor::blue);
ofDrawCircle(allFaces[i].rightEyeCenter, 5);
ofSetColor(ofColor::white);
allFaces[i].leftEye.draw();
allFaces[i].rightEye.draw();
allFaces[i].noseBridge.draw();
allFaces[i].noseTip.draw();
allFaces[i].jaw.draw();
allFaces[i].innerMouth.draw();
allFaces[i].outerMouth.draw();
allFaces[i].rightEyebrow.draw();
allFaces[i].leftEyebrow.draw();
}
// ofSetColor(ofColor::red);
// ofDrawCircle(leftEyePos, 5);
// ofSetColor(ofColor::blue);
// ofDrawCircle(rightEyePos, 5);
//
// ofSetColor(ofColor::white);
// oneFeature.draw();
ofPopStyle();
#endif
#ifdef USE_OBJECTTRACKER
ofSetColor(ofColor::blue);
ofNoFill();
ofDrawRectangle(myObject.rect);
#endif
}
//-------
#ifdef USE_FACETRACKER
ofVec2f ofxDLib::getLeftEyeCenter(const full_object_detection& d, unsigned long _faceIndex){
//ofVec2f ofxDLib::getLeftEyeCenter(const std::vector<full_object_detection>& dets, unsigned long _faceIndex){
// const full_object_detection& d = dets[_faceIndex];
double cnt = 0;
ofVec2f tempLeft;
// Find the center of the left eye by averaging the points around
// the eye.
for (unsigned long i = 36; i <= 41; ++i)
{
tempLeft += ofVec2f( d.part(i).x(), d.part(i).y() ); // det.part(i);
++cnt;
}
tempLeft /= cnt;
return tempLeft;
}
ofVec2f ofxDLib::getRightEyeCenter(const full_object_detection& d, unsigned long _faceIndex){
// const full_object_detection& d = dets[_faceIndex];
double cnt = 0;
ofVec2f tempRight;
// Find the center of the right eye by averaging the points around
// the eye.
for (unsigned long i = 42; i <= 47; ++i)
{
tempRight += ofVec2f( d.part(i).x(), d.part(i).y() ); // det.part(i);
++cnt;
}
tempRight /= cnt;
return tempRight;
}
ofPolyline ofxDLib::getOneFeature(const full_object_detection& d, int _featureId){
// ofLog()<<"getLeftEyeCenter "<<dets.size();
std::vector<ofPolyline> lines;
ofPolyline temp_pLine;
// if(_faceIndex < dets.size()){
// DLIB_CASSERT(dets[_faceIndex].num_parts() == 68,
// "\t std::vector<image_window::overlay_line> render_face_detections()"
// << "\n\t Invalid inputs were given to this function. "
// << "\n\t dets["<<_faceIndex<<"].num_parts(): " << dets[_faceIndex].num_parts()
// );
//
// const full_object_detection& d = dets[_faceIndex];
//id = 0 = jaw
temp_pLine.clear();
for (unsigned long i = 0; i <= 16; ++i){
// ofLog()<<"d.part(i) "<<d.part(i);
temp_pLine.addVertex(ofPoint(d.part(i).x(),d.part(i).y()));
// lines.push_back(image_window::overlay_line(d.part(i), d.part(i-1), color));
}
lines.push_back(temp_pLine);
//id = 1 = nose bridge
temp_pLine.clear();
for (unsigned long i = 28; i <= 30; ++i){
temp_pLine.addVertex(ofPoint(d.part(i).x(),d.part(i).y()));
// lines.push_back(image_window::overlay_line(d.part(i), d.part(i-1), color));
}
lines.push_back(temp_pLine);
//id = 2 = left eyebrow
temp_pLine.clear();
for (unsigned long i = 18; i <= 21; ++i){
temp_pLine.addVertex(ofPoint(d.part(i).x(),d.part(i).y()));
// lines.push_back(image_window::overlay_line(d.part(i), d.part(i-1), color));
}
lines.push_back(temp_pLine);
//id = 3 = right eyebrow
temp_pLine.clear();
for (unsigned long i = 22; i <= 26; ++i){
temp_pLine.addVertex(ofPoint(d.part(i).x(),d.part(i).y()));
// lines.push_back(image_window::overlay_line(d.part(i), d.part(i-1), color));
}
lines.push_back(temp_pLine);
//id = 4 = under nose + nose tip
temp_pLine.clear();
for (unsigned long i = 30; i <= 35; ++i){
temp_pLine.addVertex(ofPoint(d.part(i).x(),d.part(i).y()));
// lines.push_back(image_window::overlay_line(d.part(i), d.part(i-1), color));
}
temp_pLine.addVertex(ofPoint(d.part(30).x(),d.part(30).y()));
// lines.push_back(image_window::overlay_line(d.part(30), d.part(35), color));
lines.push_back(temp_pLine);
//id = 5 = left eye
temp_pLine.clear();
for (unsigned long i = 36; i <= 41; ++i){
temp_pLine.addVertex(ofPoint(d.part(i).x(),d.part(i).y()));
// lines.push_back(image_window::overlay_line(d.part(i), d.part(i-1), color));
}
temp_pLine.addVertex(ofPoint(d.part(36).x(),d.part(36).y()));
// lines.push_back(image_window::overlay_line(d.part(36), d.part(41), color));
lines.push_back(temp_pLine);
//id = 6 = right eye
temp_pLine.clear();
for (unsigned long i = 42; i <= 47; ++i){
temp_pLine.addVertex(ofPoint(d.part(i).x(),d.part(i).y()));
// lines.push_back(image_window::overlay_line(d.part(i), d.part(i-1), color));
}
temp_pLine.addVertex(ofPoint(d.part(42).x(),d.part(42).y()));
// lines.push_back(image_window::overlay_line(d.part(42), d.part(47), color));
lines.push_back(temp_pLine);
//id = 7 = outer mouth
temp_pLine.clear();
for (unsigned long i = 48; i <= 59; ++i){
temp_pLine.addVertex(ofPoint(d.part(i).x(),d.part(i).y()));
// lines.push_back(image_window::overlay_line(d.part(i), d.part(i-1), color));
}
temp_pLine.addVertex(ofPoint(d.part(48).x(),d.part(48).y()));
// lines.push_back(image_window::overlay_line(d.part(48), d.part(59), color));
lines.push_back(temp_pLine);
//id = 8 = inner mouth
temp_pLine.clear();
for (unsigned long i = 60; i <= 67; ++i){
temp_pLine.addVertex(ofPoint(d.part(i).x(),d.part(i).y()));
// lines.push_back(image_window::overlay_line(d.part(i), d.part(i-1), color));
}
temp_pLine.addVertex(ofPoint(d.part(60).x(),d.part(60).y()));
// lines.push_back(image_window::overlay_line(d.part(60), d.part(67), color));
lines.push_back(temp_pLine);
//}
return lines[_featureId];
}
#endif
//
// ofxDLib.h
// DLibTest
//
// Created by Stephan Schulz on 2016-02-12.
//
//
#ifndef __DLibTest__ofxDLib__
#define __DLibTest__ofxDLib__
//#define USE_FACETRACKER
#define USE_OBJECTTRACKER
#include "ofMain.h"
#ifdef SHIFT
#undef SHIFT
#endif
#ifdef USE_FACETRACKER
#include <dlib/image_processing/frontal_face_detector.h>
#include "dlib/image_processing/render_face_detections.h"
#endif
#include "dlib/image_processing.h"
#include "dlib/gui_widgets.h"
#include "dlib/image_io.h"
#include <stdio.h>
#include <vector>
using namespace dlib;
typedef struct{
int label;
int age;
ofRectangle rect;
bool used;
ofVec2f leftEyeCenter;
ofVec2f rightEyeCenter;
ofPolyline leftEye, rightEye,innerMouth, outerMouth, leftEyebrow, rightEyebrow, jaw, noseBridge, noseTip;
} face;
typedef struct{
int label;
int age;
ofRectangle rect;
bool used;
} oneObject;
class ofxDLib {
public:
ofxDLib();
virtual ~ofxDLib();
void setup(int _type);
void update();
void draw();
void findFaces(const ofPixels& pixels);
#ifdef USE_FACETRACKER
bool bDetect;
frontal_face_detector detector;
shape_predictor shape_predictor_object;
std::vector<rectangle> dets;
std::vector<full_object_detection> shapes;
ofVec2f getRightEyeCenter(const full_object_detection& d, unsigned long _faceIndex);
ofVec2f getLeftEyeCenter(const full_object_detection& d, unsigned long _faceIndex);
// ofVec2f getLeftEyeCenter(const std::vector<full_object_detection>& dets, unsigned long _faceIndex);
// ofVec2f getRightEyeCenter(const std::vector<full_object_detection>& dets, unsigned long _faceIndex);
// ofPolyline getOneFeature(const std::vector<full_object_detection>& dets, unsigned long _faceIndex, int _featureId);
ofPolyline getOneFeature(const full_object_detection& d, int _featureId);
// image_window win, win_faces;
std::vector<face> allFaces;
#endif
#ifdef USE_OBJECTTRACKER
//--------selected ojbect tracking----
correlation_tracker tracker;
void findSelection(const ofPixels& pixels);
void setNewSelection(ofRectangle _rect);
bool selectionTrackerInit;
oneObject myObject;
int sel_x,sel_y, sel_w, sel_h;
protected:
#endif
};
// This example takes in a shape model file and then a list of images to
// process. We will take these filenames in as command line arguments.
// Dlib comes with example images in the examples/faces folder so give
// those as arguments to this program.
// cout << "Call this program like this:" << endl;
// cout << "./face_landmark_detection_ex shape_predictor_68_face_landmarks.dat faces/*.jpg" << endl;
// cout << "\nYou can get the shape_predictor_68_face_landmarks.dat file from:\n";
// cout << "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2" << endl;
#endif /* defined(__DLibTest__ofxDLib__) */
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment