Skip to content

Instantly share code, notes, and snippets.

@suna-pan
Last active August 29, 2015 14:17
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save suna-pan/6409121454beed4f02b1 to your computer and use it in GitHub Desktop.
Save suna-pan/6409121454beed4f02b1 to your computer and use it in GitHub Desktop.
目の大きさで女児アニメかどうかを判定するやつ
// 女児アニメかどうかを判定するやつ
//
// MITライセンス
//
// C++11
// OpenCV 2.4.9
//
// 使用した顔検出用学習データ
// lbpcascade_animeface
// https://github.com/nagadomi/lbpcascade_animeface
//
// コンパイル
// g++ joji_anime.cpp `pkg-config --libs --cflags opencv` -std=c++0x
// g++ joji.cpp -std=c++0x -lopencv_core249 -lopencv_highgui249 -lopencv_objdetect249 -lopencv_ml249 -lopencv_imgproc249
#define _USE_MATH_DEFINES
#include <cv.h>
#include <highgui.h>
#include <iostream>
#include <cmath>
#include <limits>
#include <array>
#include <vector>
#include <stack>
#include <list>
#include <memory>
#define CLUSTER_COUNT 6
#define COL_DEC 16
// 肌の色の基準
/*
#define SKIN_R 238
#define SKIN_G 212
#define SKIN_B 194
#define SKIN_R 224
#define SKIN_G 224
#define SKIN_B 224
*/
#define SKIN_R 240
#define SKIN_G 240
#define SKIN_B 210
#define FACE_DONE 1
#define FACE_SKIN 2
#define FACE_HAIR 3
#define FACE_EYE 4
#define FACE_MASK 5
#define FACE_SLP 5
#define HAIR_AREA 3
#define HAIR_COUNT 10
#define HAIR_LINE 4
#define EYE_WIDTH 15
#define EYE_ITV 15
using namespace std;
using namespace cv;
int getFaceRect(Mat &src, vector<Rect> &faces);
int getBiggestFaceRect(Mat &src, Rect &r);
int getNearestFaceRect(Mat &src, Rect &dst, Point &tgt);
void prepareImage(Mat &dst_img, Mat &src_img);
void clustering(Mat &clusters, Mat &centers, Mat &img);
int getSkinCluster(Mat &centers);
void initFaceVector(vector<vector<int>> &face, Mat &clusters);
void getSkinRect(vector<vector<int>> &face, Mat &clusters, Point &skin_lt, Point &skin_rb);
int correctFaceTilt(vector<vector<int>> &face, Point &skin_lt,
Point &skin_rb, Mat &img, Mat &src_img, Mat &img2, Point rot_o);
void fillSurroundedLayer(vector<vector<int>> &face, vector<int> layers, int tgt, Point rect_lt, Point rect_rb);
double getColorDist(Scalar c1, Scalar c2);
void getAvColor(list<array<int, 3>> colors, array<int, 3> &dst);
int findBiggestArea(const Mat &clusters, vector<vector<int>> &face, int flag,
Point rect_lt, Point rect_rb, function<int(int)> istgt);
void medianCut(Mat &dst, Mat &src);
int main (int argc, char *argv[]) {
if(argc < 2) {
return -1;
}
// ソース画像ファイル読み込み
const char *image_file = argv[1];
Mat src_img = imread(image_file, CV_LOAD_IMAGE_COLOR); // 顔検出で矩形を描く
Mat src_img2 = src_img.clone(); // 矩形を描いていないもの
Mat src_img3; // 回転後
Mat face_img;
Rect face_r;
cerr << "@@@@ PATH 1" << endl;
// 顔認識
cerr << "GetFace..." << endl;
if(!getBiggestFaceRect(src_img, face_r)) {
namedWindow("JOJI src", CV_WINDOW_AUTOSIZE);
imshow("JOJI src", src_img);
waitKey(0);
return -1;
}
face_img = src_img2(face_r).clone();
Mat img = Mat(face_img.size(), face_img.type()); // 傾き補正後の顔 減色後
Mat img2 = Mat(face_img.size(), face_img.type()); // 傾き補正後の顔 減色後
// 傾きの補正を行うため,1回目の処理
// (顔の肌を検出するまで)
vector<vector<int>> face; // 顔の情報(肌・目・髪)を管理
// 準備
prepareImage(img, face_img);
// クラスタリング
cerr << "Clustering..." << endl;
Mat clusters, centers;
clustering(clusters, centers, img);
// 肌色のクラスタを取得
int skin_cls = getSkinCluster(centers);
// 肌色のクラスタの中で最も大きいものを肌レイヤーとする
cerr << "GetSkinLer..." << endl;
initFaceVector(face, clusters);
findBiggestArea(clusters, face, FACE_SKIN, Point(0, 0),
Point(clusters.cols, clusters.rows),
[&skin_cls](int x)->int { return x == skin_cls ? 1 : 0; });
// 肌レイヤーを囲む矩形を得る
Point skin_lt, skin_rb;
getSkinRect(face, clusters, skin_lt, skin_rb);
// 顔の傾きを補正する
cerr << "CorrectFaceTilt..." << endl;
int cft = correctFaceTilt(face, skin_lt, skin_rb, img, src_img2, src_img3,
Point(face_r.x + face_r.width / 2, face_r.y + face_r.height / 2));
// 回転補正後の画像で2回目の処理
// (最後まで)
cerr << "@@@@ PATH 2" << endl;
if(!cft) {
Point face_p = Point(face_r.x, face_r.y);
if(!getNearestFaceRect(src_img3, face_r, face_p)) {
namedWindow("JOJI src", CV_WINDOW_AUTOSIZE);
imshow("JOJI src", src_img);
namedWindow("JOJI src2", CV_WINDOW_AUTOSIZE);
imshow("JOJI src2", src_img3);
waitKey(0);
return -1;
}
face_img = src_img3(face_r).clone();
prepareImage(img, face_img);
// クラスタリング
cerr << "Clustering..." << endl;
clustering(clusters, centers, img);
// 肌色のクラスタを取得
skin_cls = getSkinCluster(centers);
// 肌色のクラスタの中で最も大きいものを肌レイヤーとする
cerr << "GetSkinLer..." << endl;
initFaceVector(face, clusters);
findBiggestArea(clusters, face, FACE_SKIN, Point(0, 0),
Point(clusters.cols, clusters.rows),
[&skin_cls](int x)->int { return x == skin_cls ? 1 : 0; });
// 肌レイヤーを囲む矩形を得る
getSkinRect(face, clusters, skin_lt, skin_rb);
}
// 肌レイヤーの上部に位置するクラスタから髪レイヤーを得る
// 肌レイヤーを囲む矩形の上部の領域で,領域の1/HAIR_AREA以上を占めるクラスタを探す
cerr << "GetHairLayer..." << endl;
array<int, CLUSTER_COUNT> hair_count = {};
for(int i = skin_lt.x; i < skin_rb.x; i++) {
for(int j = 0; j < skin_lt.y; j++) {
hair_count[clusters.at<int>(i * clusters.cols + j)]++;
}
}
int hair_area_bd = (skin_rb.x - skin_lt.x) * skin_lt.y / HAIR_AREA;
for(int i = 0; i < CLUSTER_COUNT; i++) {
if(hair_count[i] < hair_area_bd) {
hair_count[i] = -1;
}
}
// 肌と同じように髪レイヤーを探す(HAIR_COUNT回繰り返す)
for(int i = 0; i < HAIR_COUNT; i++) {
findBiggestArea(clusters, face, FACE_HAIR, Point(0, 0),
Point(clusters.cols, clusters.rows),
[&hair_count](int x)->int {
int f = 0;
for(int i = 0; i < CLUSTER_COUNT; i++) {
if(hair_count[i] > 0 && x == i) f = 1;
}
return f;
});
}
// 肌レイヤーを囲む矩形の上端を,髪レイヤーが1行の
// 1/HAIR_LINE以下になる位置まで下げる
cerr << "GetEyesRect..." << endl;
Point new_skin_lt(skin_lt.x, skin_lt.y);
Point new_skin_rb(skin_rb.x, skin_rb.y);
hair_area_bd = (skin_rb.x - skin_lt.x) / HAIR_LINE;
for(int i = skin_lt.y; i < skin_rb.y; i++) {
int t = 0;
for(int j = skin_lt.x; j < skin_rb.x; j++) {
if(face[j][i] == FACE_HAIR)
t++;
}
if(t < hair_area_bd) {
new_skin_lt.y = i;
break;
}
}
// 肌レイヤーを囲む矩形の下端を,
// 肌レイヤー内部で最初に2箇所肌以外が見つかる場所まで上げる
for(int i = skin_rb.y; i > new_skin_lt.y; i--) {
int f = 0, j, k;
for(j = skin_lt.x; j < skin_rb.x && face[j][i] != FACE_SKIN; j++)
;
for(; j < skin_rb.x; j++) {
if(f == 0 && face[j][i] != FACE_SKIN) {
f = 1;
for(k = (skin_rb.x - skin_lt.x) / EYE_WIDTH; j < skin_rb.x && k > 0 && face[j][i] != FACE_SKIN; k--, j++)
;
if(k) break;
}
if(f == 1 && face[j][i] == FACE_SKIN) {
f = 2;
for(k = (skin_rb.x - skin_lt.x) / EYE_ITV; j < skin_rb.x && k > 0 && face[j][i] == FACE_SKIN; k--, j++)
;
if(k) break;
}
if(f == 2 && face[j][i] != FACE_SKIN) {
f = 3;
for(k = (skin_rb.x - skin_lt.x) / EYE_WIDTH; j < skin_rb.x && k > 0 && face[j][i] != FACE_SKIN; k--, j++)
;
if(k) break;
}
if(f == 3 && face[j][i] == FACE_SKIN) {
f = 4;
new_skin_rb.y = i;
break;
}
}
if(f == 4)
break;
}
// 目のすぐ下まで下げる
for(int i = new_skin_rb.y; i < skin_rb.y; i++) {
int j, k;
for(j = skin_lt.x; j < skin_rb.x && face[j][i] != FACE_SKIN; j++)
;
for(; j < skin_rb.x && face[j][i] == FACE_SKIN; j++)
;
for(k = skin_rb.x; k > skin_lt.x && face[k][i] != FACE_SKIN; k--)
;
if(j == k + 1) {
new_skin_rb.y = i;
break;
}
}
// 目の横の顔以外の三角形の領域をマスク
// 左側
Point eye_tr_1, eye_tr_2, eye_tr_3;
eye_tr_1.x = new_skin_lt.x; eye_tr_1.y = new_skin_rb.y;
eye_tr_2.y = new_skin_rb.y; eye_tr_3.x = new_skin_lt.x;
for(eye_tr_2.x = new_skin_lt.x; eye_tr_2.x < new_skin_rb.x && face[eye_tr_2.x][new_skin_rb.y] != FACE_SKIN; eye_tr_2.x++)
;
for(eye_tr_3.y = new_skin_rb.y; eye_tr_3.y > new_skin_lt.y && face[new_skin_lt.x][eye_tr_3.y] != FACE_SKIN; eye_tr_3.y--)
;
double slp = (eye_tr_3.y - eye_tr_1.y) / (eye_tr_2.x - eye_tr_1.x);
for(int i = eye_tr_1.x; i < eye_tr_2.x; i++){
for(int j = new_skin_rb.y; j > eye_tr_3.y - (i - eye_tr_1.x) * slp; j--) {
face[i][j] = FACE_MASK;
}
}
// 右側
eye_tr_1.x = new_skin_rb.x; eye_tr_1.y = new_skin_rb.y;
eye_tr_2.y = new_skin_rb.y; eye_tr_3.x = new_skin_rb.x;
for(eye_tr_2.x = new_skin_rb.x; eye_tr_2.x > new_skin_lt.x && face[eye_tr_2.x][new_skin_rb.y] != FACE_SKIN; eye_tr_2.x--)
;
for(eye_tr_3.y = new_skin_rb.y; eye_tr_3.y > new_skin_lt.y && face[new_skin_lt.x][eye_tr_3.y] != FACE_SKIN; eye_tr_3.y--)
;
slp = (eye_tr_3.y - eye_tr_1.y) / (eye_tr_2.x - eye_tr_1.x);
for(int i = eye_tr_1.x; i > eye_tr_2.x; i--){
for(int j = new_skin_rb.y; j > eye_tr_3.y - (i - eye_tr_1.x) * slp; j--) {
face[i][j] = FACE_MASK;
}
}
// 肌レイヤーを囲む矩形内で,肌レイヤーでも髪レイヤーでもない
// 面積の大きい部分2つが目レイヤー
cerr << "GetEyesLayer..." << endl;
for(int i = 0; i < 2; i++) {
findBiggestArea(clusters, face, FACE_EYE, new_skin_lt, new_skin_rb,
[&skin_cls](int x)->int {
int f = 1;
if(x == skin_cls) f = 0;
return f;
});
}
/*for(int i = new_skin_lt.y; i < new_skin_rb.y; i++) {
for(int j = new_skin_lt.x; j < new_skin_rb.x; j++) {
if(face[j][i] != FACE_SKIN && face[j][i] != FACE_MASK)
face[j][i] = FACE_EYE;
}
}*/
fillSurroundedLayer(face, {FACE_EYE}, FACE_EYE, new_skin_lt, new_skin_rb);
// 出力用のイメージを作成
// ついでに肌の面積と目の面積を求める
Mat d_img = Mat(face_img.size(), face_img.type()); // 分類結果
int skin_area = 0, eyes_area = 0;
int img_size = img.cols * img.rows;
for(int i = 0; i < img_size; i++) {
unsigned char r, g, b;
int x = i % img.cols, y = i / img.cols;
switch(face[x][y]) {
case FACE_SKIN:
r = g = b = 0;
skin_area++;
break;
case FACE_HAIR:
r = g = b = 200;
break;
case FACE_EYE:
b = 255;
g = r = 0;
eyes_area++;
break;
case FACE_MASK:
r = 255;
b = g = 60;
break;
default:
r = g = b = 255;
int idx = y * clusters.cols + x;
if(clusters.at<int>(idx) == skin_cls) {
b = g = 150;
} else {
for(int i = 0; i < CLUSTER_COUNT; i++) {
if(hair_count[i] > 0 && clusters.at<int>(idx) == i)
r = b = 150;
}
}
break;
}
d_img.data[i * 3 + 0] = b;
d_img.data[i * 3 + 1] = g;
d_img.data[i * 3 + 2] = r;
}
// 肌レイヤーを囲む矩形を描画
rectangle(d_img, skin_lt, skin_rb, Scalar(0,255, 0), 2, 2);
rectangle(d_img, new_skin_lt, new_skin_rb, Scalar(0, 0, 255), 2, 2);
// 肌と目の面積の割合を計算
double joji_factor = (double)eyes_area / (double)(skin_area + eyes_area);
//string res_skin = "skin " + to_string(skin_area);
//string res_eyes = "eyes " + to_string(eyes_area);
//string res_joji_factor = "joji factor " + to_string(joji_factor);
cerr << endl;
cout << "skin " << skin_area << endl;
cout << "eyes " << eyes_area << endl;
cout << "joji factor " << joji_factor << endl;
if(joji_factor > 0.19) {
cout << "JOJI" << endl;
} else {
cout << "OTAKU" << endl;
}
// 表示
namedWindow("JOJI src", CV_WINDOW_AUTOSIZE);
namedWindow("JOJI res1", CV_WINDOW_AUTOSIZE);
namedWindow("JOJI res2", CV_WINDOW_AUTOSIZE);
imshow("JOJI src", src_img);
imshow("JOJI res1", img);
imshow("JOJI res2", d_img);
waitKey(0);
destroyWindow("JOJI src");
destroyWindow("JOJI res1");
destroyWindow("JOJI res2");
centers.release();
clusters.release();
src_img.release();
img.release();
d_img.release();
return 0;
}
// 顔検出
int getFaceRect(Mat &src, vector<Rect> &faces) {
Mat img;
CascadeClassifier face_cascade;
face_cascade.load("lbpcascade_animeface.xml");
cvtColor(src, img, COLOR_BGR2GRAY );
equalizeHist(img, img);
face_cascade.detectMultiScale(img, faces, 1.1, 3, 0, Size(80, 80));
cerr << "found : " << faces.size() << " faces" << endl;
if(faces.empty()) {
return 0;
} else {
return 1;
}
}
// 顔検出して一番大きい顔を取得
int getBiggestFaceRect(Mat &src, Rect &r) {
vector<Rect> faces;
int area = 0;
if(!getFaceRect(src, faces)) {
return 0;
}
Rect largest;
vector<Rect>::iterator itr;
for(itr = faces.begin(); itr != faces.end(); itr++) {
if((*itr).width * (*itr).height > area) {
area = (*itr).width * (*itr).height;
largest = *itr;
}
}
for(itr = faces.begin(); itr != faces.end(); itr++) {
// srcに枠を引く
rectangle(src, Point((*itr).x, (*itr).y),
Point((*itr).x + (*itr).width, (*itr).y + (*itr).height),
Scalar(0, 0, 255), 2, 2);
}
// srcに枠を引く
rectangle(src, Point(largest.x, largest.y),
Point(largest.x + largest.width, largest.y + largest.height),
Scalar(255, 0, 0), 2, 2);
r = Rect(largest.x, largest.y, largest.width, largest.height);
return 1;
}
// 顔検出して最も与えられた座標に近い顔を取得する
int getNearestFaceRect(Mat &src, Rect &dst, Point &tgt) {
vector<Rect> faces;
if(!getFaceRect(src, faces)) {
return 0;
}
Rect nearest;
int dist = numeric_limits<int>::max();
vector<Rect>::iterator itr;
for(itr = faces.begin(); itr != faces.end(); itr++) {
double xd = tgt.x - (*itr).x; xd *= xd;
double yd = tgt.y - (*itr).y; yd *= yd;
double d = sqrt(xd + yd);
if(d < dist) {
dist = d;
nearest = *itr;
}
}
dst = Rect(nearest.x, nearest.y, nearest.width, nearest.height);
return 1;
}
// 減色・正規化
void prepareImage(Mat &dst_img, Mat &src_img) {
Mat norm(src_img.size(), src_img.type());
Mat t_img(src_img.size(), src_img.type());
// 平滑化
blur(src_img, src_img, Size(2, 2));
// 正規化
normalize(src_img, norm, 0, 255, NORM_MINMAX, CV_8UC3);
// 減色
for(int i = 0; i < src_img.cols; i++) {
for(int j = 0; j < src_img.rows; j++) {
int idx = i * src_img.elemSize() + j * src_img.step;
t_img.data[idx + 0] = norm.data[idx + 0];
t_img.data[idx + 1] = norm.data[idx + 1];
t_img.data[idx + 2] = norm.data[idx + 2];
}
}
medianCut(dst_img, t_img);
return;
}
// つながっている領域の大きさを数える
int countSameArea(const Mat &clusters, vector<vector<int>> &face_flag, int i, int j,
Point rect_lt, Point rect_rb, function<int(int)> istgt) {
int t = 0;
stack<Point> tgt_pos, patch_pos;
tgt_pos.push(Point(i, j));
// つながっている領域を数え上げる
while(!tgt_pos.empty()) {
Point p = tgt_pos.top();
tgt_pos.pop();
patch_pos.push(Point(p.x, p.y));
face_flag[p.x][p.y] = FACE_DONE;
t++;
if(p.x - 1 >= rect_lt.x && !face_flag[p.x - 1][p.y]
&& istgt(clusters.at<int>(p.y * clusters.cols + (p.x - 1))))
tgt_pos.push(Point(p.x - 1, p.y));
if(p.x + 1 < rect_rb.x && !face_flag[p.x + 1][p.y]
&& istgt(clusters.at<int>(p.y * clusters.cols + (p.x + 1))))
tgt_pos.push(Point(p.x + 1, p.y));
if(p.y - 1 >= rect_lt.y && !face_flag[p.x][p.y - 1]
&& istgt(clusters.at<int>((p.y - 1) * clusters.cols + p.x)))
tgt_pos.push(Point(p.x, p.y - 1));
if(p.y + 1 < rect_rb.y && !face_flag[p.x][p.y + 1]
&& istgt(clusters.at<int>((p.y + 1) * clusters.cols + p.x)))
tgt_pos.push(Point(p.x, p.y + 1));
}
// 結果を書き込み
while(!patch_pos.empty()) {
Point p = patch_pos.top();
patch_pos.pop();
face_flag[p.x][p.y] = t;
}
return t;
}
// 指定矩形領域の指定クラスタの中で最も面積が大きい部分を探し,faceの対応する要素をflagで埋める
// 面積を返す
int findBiggestArea(const Mat &clusters, vector<vector<int>> &face, int flag,
Point rect_lt, Point rect_rb, function<int(int)> istgt) {
int s = 0, t = 0;
vector<vector<int>> face_flag;
face_flag.resize(clusters.cols);
for(int i = 0; i < clusters.cols; i++) {
face_flag[i].resize(clusters.rows);
for(int j = 0; j < clusters.rows; j++) {
face_flag[i][j] = 0;
if(face[i][j])
face_flag[i][j] = FACE_DONE; // すでに調べ終わっている部分は除外
}
}
// 全要素
for(int i = rect_lt.x; i < rect_rb.x; i++) {
for(int j = rect_lt.y; j < rect_rb.y; j++) {
// 指定クラスタ以外なら無視
if(face_flag[i][j] || !istgt(clusters.at<int>(j * clusters.cols + i))) {
continue;
}
// 面積を調べる
t = countSameArea(clusters, face_flag, i, j, rect_lt, rect_rb, istgt);
if(t > s) s = t;
}
}
if(!s) return s;
// 最も大きいクラスタが存在する領域をflagで埋める
for(int i = 0; i < clusters.cols; i++) {
for(int j = 0; j < clusters.rows; j++) {
if(face_flag[i][j] == s)
face[i][j] = flag;
}
}
return s;
}
// 指定されたレイヤーに囲まれた領域を指定したレイヤーとする
void fillSurroundedLayer(vector<vector<int>> &face, vector<int> layers, int tgt, Point rect_lt, Point rect_rb) {
stack<Point> tgt_pos, patch_pos;
vector<vector<int>> face_flag;
face_flag.resize(face[0].size());
for(int i = 0; i < face[0].size(); i++) {
face_flag[i].resize(face.size());
for(int j = 0; j < face.size(); j++) {
face_flag[i][j] = 0;
}
}
// つながっている領域を数え上げる
for(int i = rect_lt.x; i < rect_rb.x; i++) {
for(int j = rect_lt.y; j < rect_rb.y; j++) {
if(face_flag[i][j])
continue;
int f = 0;
for(auto itr = layers.begin(); itr != layers.end(); itr++) {
if(face[i][j] == *itr) {
f = 1;
break;
}
}
if(f)
continue;
int now = face[i][j];
tgt_pos.push(Point(i, j));
while(!tgt_pos.empty()) {
Point p = tgt_pos.top();
tgt_pos.pop();
patch_pos.push(Point(p.x, p.y));
face_flag[p.x][p.y] = FACE_DONE;
f = 0;
if(p.x - 1 >= rect_lt.x) {
if(face_flag[p.x - 1][p.y]) {
f = 1;
} else if(face[p.x - 1][p.y] == now) {
f = 1;
tgt_pos.push(Point(p.x - 1, p.y));
} else {
for(auto itr = layers.begin(); itr != layers.end(); itr++) {
if(face[p.x - 1][p.y] == *itr)
f = 1;
}
}
}
if(!f)
break;
f = 0;
if(p.x + 1 < rect_lt.x) {
if(face_flag[p.x + 1][p.y]) {
f = 1;
} else if(face[p.x + 1][p.y] == now) {
f = 1;
tgt_pos.push(Point(p.x + 1, p.y));
} else {
for(auto itr = layers.begin(); itr != layers.end(); itr++) {
if(face[p.x + 1][p.y] == *itr)
f = 1;
}
}
}
if(!f)
break;
f = 0;
if(p.y - 1 >= rect_lt.y) {
if(face_flag[p.x][p.y - 1]) {
f = 1;
} else if(face[p.x][p.y - 1] == now) {
f = 1;
tgt_pos.push(Point(p.x, p.y - 1));
} else {
for(auto itr = layers.begin(); itr != layers.end(); itr++) {
if(face[p.x][p.y - 1] == *itr)
f = 1;
}
}
}
if(!f)
break;
f = 0;
if(p.y + 1 < rect_lt.y) {
if(face_flag[p.x][p.y + 1] && face[p.x][p.y + 1] == now) {
f = 1;
} else if(face[p.x][p.y + 1] == now) {
f = 1;
tgt_pos.push(Point(p.x, p.y + 1));
} else {
for(auto itr = layers.begin(); itr != layers.end(); itr++) {
if(face[p.x][p.y + 1] == *itr)
f = 1;
}
}
}
if(!f)
break;
}
if(f) {
// 結果を書き込み
while(!patch_pos.empty()) {
cout << "BBB" << endl;
Point p = patch_pos.top();
patch_pos.pop();
face[p.x][p.y] = tgt;
}
}
}
}
}
// 2色の距離
double getColorDist(Scalar c1, Scalar c2) {
int b = c1(0) - c2(0); b *= b;
int g = c1(1) - c2(1); g *= g;
int r = c1(2) - c2(2); r *= r;
return sqrt(b + g + r);
}
// 色の平均を取得
void getAvColor(list<array<int, 3>> colors, array<int, 3> &dst) {
if(colors.size() == 0) {
dst[0] = dst[1] = dst[2] = 0;
return;
}
array<int, 3> sum = {};
list<array<int, 3>>::iterator itr;
for(itr = colors.begin(); itr != colors.end(); itr++) {
sum[0] += (*itr)[0];
sum[1] += (*itr)[1];
sum[2] += (*itr)[2];
}
dst[0] = sum[0] / colors.size();
dst[1] = sum[1] / colors.size();
dst[2] = sum[2] / colors.size();
}
// メディアンカット法で減色
class ColorBox {
public:
list<array<int, 3>> l;
int count;
ColorBox(list<array<int, 3>> a, int c) {
l = a; count = c;
}
};
void medianCut(Mat &dst, Mat &src) {
array<array<array<int, 32>, 32>, 32> color_cubes = {};
list<array<int ,3>> colors;
int r_max = 0, g_max = 0, b_max = 0;
int r_min = 300, g_min = 300, b_min = 300;
// 直方体を作成
for(int i = 0; i < src.cols; i++) {
for(int j = 0; j < src.rows; j++) {
int idx = i * src.elemSize() + j * src.step;
int b = src.data[idx + 0] & 0xf8;
int g = src.data[idx + 1] & 0xf8;
int r = src.data[idx + 2] & 0xf8;
if(!color_cubes[b >> 3][g >> 3][r >> 3]) {
colors.push_front({b, g, r});
if(b > b_max) b_max = b;
if(g > g_max) g_max = g;
if(r > r_max) r_max = r;
if(b < b_min) b_min = b;
if(g < g_min) g_min = g;
if(r < r_min) r_min = r;
color_cubes[b >> 3][g >> 3][r >> 3] = 1;
}
}
}
// 減色
// 最初に全体の色の平均をパレットに入れておく
list<array<int, 3>> tmp1, tmp2, pallete;
list<ColorBox> boxes;
array<int, 3> ctmp;
getAvColor(colors, ctmp);
pallete.push_front({ctmp[0], ctmp[1], ctmp[2]});
boxes.push_front(ColorBox(colors, colors.size()));
for(int pallete_count = 1; pallete_count < COL_DEC; pallete_count++) {
// 最も長い辺を取得
int large_edge = r_max - r_min;
int edge_mid = (r_max + r_min) / 2;
int color_idx = 2;
if(large_edge < (g_max - g_min) * 0.8) {
large_edge = g_max - g_min;
edge_mid = (g_max + g_min) / 2;
color_idx = 1;
}
if(large_edge < (b_max - b_min) * 0.5) {
large_edge = b_max - b_min;
edge_mid = (b_max + b_min) / 2;
color_idx = 0;
}
// 中央値で直方体を分割し,含む色の数を取得
tmp1.clear(); tmp2.clear();
list<array<int, 3>>::iterator itr;
for(itr = colors.begin(); itr != colors.end(); itr++) {
if((*itr)[color_idx] > edge_mid)
tmp1.push_front(*itr);
else
tmp2.push_front(*itr);
}
// カラーボックスリストの先頭を削除して降順挿入
boxes.pop_front();
list<ColorBox>::iterator cbitr;
for(cbitr = boxes.begin(); cbitr != boxes.end(); cbitr++) {
if((*cbitr).count < tmp1.size())
break;
}
boxes.insert(cbitr, ColorBox(tmp1, tmp1.size()));
for(cbitr = boxes.begin(); cbitr != boxes.end(); cbitr++) {
if((*cbitr).count < tmp2.size())
break;
}
boxes.insert(cbitr, ColorBox(tmp2, tmp2.size()));
// カラーボックスリストの先頭が最大のカラーボックス
colors.clear();
ColorBox front = boxes.front();
for(itr = front.l.begin(); itr != front.l.end(); itr++)
colors.push_front(*itr);
// 各色の最大値を取得
r_max = 0; g_max = 0; b_max = 0;
r_min = 300; g_min = 300; b_min = 300;
for(itr = colors.begin(); itr != colors.end(); itr++) {
if((*itr)[0] > b_max) b_max = (*itr)[0];
if((*itr)[1] > g_max) g_max = (*itr)[1];
if((*itr)[2] > r_max) r_max = (*itr)[2];
if((*itr)[0] < b_min) b_min = (*itr)[0];
if((*itr)[1] < g_min) g_min = (*itr)[1];
if((*itr)[2] < r_min) r_min = (*itr)[2];
}
}
// カラーボックスリストからパレット作成(平均色を計算)
list<ColorBox>::iterator cbitr;
for(cbitr = boxes.begin(); cbitr != boxes.end(); cbitr++) {
array<int, 3> avc;
getAvColor((*cbitr).l, avc);
pallete.push_front({avc[0], avc[1], avc[2]});
}
// パレットの色で新たな画像を作成
dst = Mat(src.size(), src.type());
for(int i = 0; i < src.cols; i++) {
for(int j = 0; j < src.rows; j++) {
int idx = i * src.elemSize() + j * src.step;
int b = src.data[idx + 0];
int g = src.data[idx + 1];
int r = src.data[idx + 2];
// colors中の最も距離が近い色を使う
int min_dist = numeric_limits<int>::max();
array<int, 3> use_pallete;
list<array<int, 3>>::iterator itr;
for(itr = pallete.begin(); itr != pallete.end(); itr++) {
Scalar c = Scalar((*itr)[0], (*itr)[1], (*itr)[2]);
double dist = getColorDist(Scalar(b, g, r), c);
if(dist < min_dist) {
min_dist = dist;
use_pallete = *itr;
}
}
dst.data[idx + 0] = use_pallete[0];
dst.data[idx + 1] = use_pallete[1];
dst.data[idx + 2] = use_pallete[2];
}
}
}
// クラスタリング
void clustering(Mat &clusters, Mat &centers, Mat &img) {
int img_size = img.cols * img.rows;
// 1行の行列に変形
Mat samples = img.reshape(1, img_size);
samples.convertTo(samples, CV_32FC1, 1.0 / 255);
// クラスタリング
kmeans(samples, CLUSTER_COUNT, clusters,
TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 1.0),
1, KMEANS_PP_CENTERS, centers);
// 3チャネル
centers.convertTo(centers, CV_8UC1, 255.0);
centers = centers.reshape(3);
// 2x2行列
clusters = clusters.reshape(1, img.rows);
samples.release();
}
// 肌色のクラスタを取得
int getSkinCluster(Mat &centers) {
// 肌色の基準値との距離が最も小さいクラスタ中心値を取得
double min_val = numeric_limits<double>::max();
int skin_cls = 0;
for(int i = 0; i < CLUSTER_COUNT; i++) {
int b, g, r;
double dist;
b = (int)centers.data[i * 3 + 0];
g = (int)centers.data[i * 3 + 1];
r = (int)centers.data[i * 3 + 2];
dist = getColorDist(Scalar(b, g, r), Scalar(SKIN_B, SKIN_G, SKIN_R));
if(dist < min_val) {
min_val = dist;
skin_cls = i;
}
}
return skin_cls;
}
// 顔の情報を初期化
void initFaceVector(vector<vector<int>> &face, Mat &clusters) {
face.resize(clusters.cols);
for(int i = 0; i < clusters.cols; i++) {
face[i].resize(clusters.rows);
for(int j = 0; j < clusters.rows; j++) {
face[i][j] = 0;
}
}
}
// 顔の肌を囲む矩形を得る
void getSkinRect(vector<vector<int>> &face, Mat &clusters, Point &skin_lt, Point &skin_rb) {
skin_lt.x = clusters.cols; skin_lt.y = clusters.rows;
skin_rb.x = 0; skin_rb.y = 0;
for(int i = 0; i < clusters.cols; i++) {
for(int j = 0; j < clusters.rows; j++) {
if(face[i][j] == FACE_SKIN ) {
if(skin_lt.x > i) skin_lt.x = i;
if(skin_lt.y > j) skin_lt.y = j;
if(skin_rb.x < i) skin_rb.x = i;
if(skin_rb.y < j) skin_rb.y = j;
}
}
}
if(skin_lt.y == 0) skin_lt.y = 1;
}
// 顔の傾きを補正する
int correctFaceTilt(vector<vector<int>> &face, Point &skin_lt, Point &skin_rb, Mat &img, Mat &src_img, Mat &img2, Point rot_o) {
// 顔の傾きを調べる
// 顔を囲む矩形の下側1/FACE_SLPの領域内の輪郭の平均変化率
constexpr double hlfpi = M_PI / 2;
int face_slp_top = skin_rb.y - (skin_rb.y - skin_lt.y) / FACE_SLP;
int face_slp_lb, face_slp_lt, face_slp_rb, face_slp_rt;
double face_slp_l, face_slp_r;
// 左側
for(int i = skin_lt.x; i <= skin_rb.x; i++) {
if(face[i][skin_rb.y] == FACE_SKIN) {
face_slp_lb = i;
break;
}
}
for(int i = skin_lt.x; i <= skin_rb.x; i++) {
if(face[i][face_slp_top] == FACE_SKIN) {
face_slp_lt = i;
break;
}
}
face_slp_l = (double)(skin_rb.y - face_slp_top ) / (double)(face_slp_lt - face_slp_lb);
// 右側
for(int i = skin_rb.x; i >= skin_lt.x; i--) {
if(face[i][skin_rb.y] == FACE_SKIN) {
face_slp_rb = i;
break;
}
}
for(int i = skin_rb.x; i >= skin_lt.x; i--) {
if(face[i][face_slp_top] == FACE_SKIN) {
face_slp_rt = i;
break;
}
}
face_slp_r = (double)(skin_rb.y - face_slp_top ) / (double)(face_slp_rt - face_slp_rb);
cerr << "kl " << face_slp_l << endl;
cerr << "kr " << face_slp_r << endl;
// 傾きの角度を計算する
int face_hlfpi_l = face_slp_l / hlfpi;
int face_hlfpi_r = face_slp_r / hlfpi;
double face_th_l = atan(face_slp_l - face_hlfpi_l * hlfpi) + (face_hlfpi_l * hlfpi * M_PI / 180.0);
double face_th_r = atan(face_slp_r - face_hlfpi_r * hlfpi) + (face_hlfpi_r * hlfpi * M_PI / 180.0);
double rotth = face_th_l + face_th_r;
cerr << "tl " << face_th_l << endl;
cerr << "tr " << face_th_r << endl;
if(fabs(rotth) < 8.0 * M_PI / 180) {
cerr << "Nothing to do" << endl;
img2 = src_img.clone();
return 1;
}
// 変換行列をつくる
Mat rotmat(2, 3, CV_32FC1);
rotmat.at<float>(0, 0) = cos(rotth); rotmat.at<float>(0, 1) = -sin(rotth);
rotmat.at<float>(1, 0) = sin(rotth); rotmat.at<float>(1, 1) = cos(rotth);
double new_x = rot_o.x * cos(rotth) - rot_o.y * sin(rotth);
double new_y = rot_o.x * sin(rotth) + rot_o.y * cos(rotth);
rotmat.at<float>(0, 2) = rot_o.x - new_x;
rotmat.at<float>(1, 2) = rot_o.y - new_y;
// 回転する
img2 = Mat(src_img.size(), src_img.type());
warpAffine(src_img, img2, rotmat, src_img.size());
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment