Create a gist now

Instantly share code, notes, and snippets.

@GilLevi /README.md
Last active Feb 23, 2017

Age and Gender Classification using Convolutional Neural Networks

Age and Gender Classification using Convolutional Neural Networks

name: Age Classification CNN

caffemodel: age_net.caffemodel

caffemodel_url: https://dl.dropboxusercontent.com/u/38822310/age_net.caffemodel

name: Gender Classification CNN

caffemodel: gender_net.caffemodel

caffemodel_url: https://dl.dropboxusercontent.com/u/38822310/gender_net.caffemodel

mean_file_proto: https://dl.dropboxusercontent.com/u/38822310/mean.binaryproto

gist_id: c9e99062283c719c03de

Description

Convolutional neural networks for age and gender classification as described in the following work:

Gil Levi and Tal Hassner, Age and Gender Classification Using Convolutional Neural Networks, IEEE Workshop on Analysis and Modeling of Faces and Gestures (AMFG), at the IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), Boston, June 2015

Project page: http://www.openu.ac.il/home/hassner/projects/cnn_agegender/

If you find our models useful, please add suitable reference to our paper in your work.


Copyright 2015, Gil Levi and Tal Hassner

The SOFTWARE provided in this page is provided "as is", without any guarantee made as to its suitability or fitness for any particular use. It may contain bugs, so use of this tool is at your own risk. We take no responsibility for any damage of any sort that may unintentionally be caused through its use.

net: "/home/ubuntu/AdienceFaces/age/train_val.prototxt"
test_iter: 1000
test_interval: 1000
base_lr: 0.001
lr_policy: "step"
gamma: 0.1
stepsize: 10000
display: 20
max_iter: 50000
momentum: 0.9
weight_decay: 0.0005
snapshot: 1000
snapshot_prefix: "caffenet_train"
solver_mode: GPU
name: "CaffeNet"
layers {
name: "data"
type: DATA
top: "data"
top: "label"
data_param {
source: "/home/ubuntu/AdienceFaces/lmdb/age_train_lmdb"
backend: LMDB
batch_size: 50
}
transform_param {
crop_size: 227
mean_file: "/home/ubuntu/AdienceFaces/mean_image/mean.binaryproto"
mirror: true
}
include: { phase: TRAIN }
}
layers {
name: "data"
type: DATA
top: "data"
top: "label"
data_param {
source: "/home/ubuntu/AdienceFaces/lmdb/age_val_lmdb"
backend: LMDB
batch_size: 50
}
transform_param {
crop_size: 227
mean_file: "/home/ubuntu/AdienceFaces/mean_image/mean.binaryproto"
mirror: false
}
include: { phase: TEST }
}
layers {
name: "conv1"
type: CONVOLUTION
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 96
kernel_size: 7
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "relu1"
type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
name: "pool1"
type: POOLING
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm1"
type: LRN
bottom: "pool1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv2"
type: CONVOLUTION
bottom: "norm1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu2"
type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
name: "pool2"
type: POOLING
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm2"
type: LRN
bottom: "pool2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv3"
type: CONVOLUTION
bottom: "norm2"
top: "conv3"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers{
name: "relu3"
type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
name: "pool5"
type: POOLING
bottom: "conv3"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "fc6"
type: INNER_PRODUCT
bottom: "pool5"
top: "fc6"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 512
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu6"
type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
name: "drop6"
type: DROPOUT
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc7"
type: INNER_PRODUCT
bottom: "fc6"
top: "fc7"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 512
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu7"
type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
name: "drop7"
type: DROPOUT
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc8"
type: INNER_PRODUCT
bottom: "fc7"
top: "fc8"
blobs_lr: 10
blobs_lr: 20
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 8
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "accuracy"
type: ACCURACY
bottom: "fc8"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
}
layers {
name: "loss"
type: SOFTMAX_LOSS
bottom: "fc8"
bottom: "label"
top: "loss"
}
name: "CaffeNet"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 227
input_dim: 227
layers {
name: "conv1"
type: CONVOLUTION
bottom: "data"
top: "conv1"
convolution_param {
num_output: 96
kernel_size: 7
stride: 4
}
}
layers {
name: "relu1"
type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
name: "pool1"
type: POOLING
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm1"
type: LRN
bottom: "pool1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv2"
type: CONVOLUTION
bottom: "norm1"
top: "conv2"
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
}
}
layers {
name: "relu2"
type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
name: "pool2"
type: POOLING
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm2"
type: LRN
bottom: "pool2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv3"
type: CONVOLUTION
bottom: "norm2"
top: "conv3"
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
}
}
layers{
name: "relu3"
type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
name: "pool5"
type: POOLING
bottom: "conv3"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "fc6"
type: INNER_PRODUCT
bottom: "pool5"
top: "fc6"
inner_product_param {
num_output: 512
}
}
layers {
name: "relu6"
type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
name: "drop6"
type: DROPOUT
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc7"
type: INNER_PRODUCT
bottom: "fc6"
top: "fc7"
inner_product_param {
num_output: 512
}
}
layers {
name: "relu7"
type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
name: "drop7"
type: DROPOUT
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc8"
type: INNER_PRODUCT
bottom: "fc7"
top: "fc8"
inner_product_param {
num_output: 8
}
}
layers {
name: "prob"
type: SOFTMAX
bottom: "fc8"
top: "prob"
}
name: "CaffeNet"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 227
input_dim: 227
layers {
name: "conv1"
type: CONVOLUTION
bottom: "data"
top: "conv1"
convolution_param {
num_output: 96
kernel_size: 7
stride: 4
}
}
layers {
name: "relu1"
type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
name: "pool1"
type: POOLING
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm1"
type: LRN
bottom: "pool1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv2"
type: CONVOLUTION
bottom: "norm1"
top: "conv2"
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
}
}
layers {
name: "relu2"
type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
name: "pool2"
type: POOLING
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm2"
type: LRN
bottom: "pool2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv3"
type: CONVOLUTION
bottom: "norm2"
top: "conv3"
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
}
}
layers{
name: "relu3"
type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
name: "pool5"
type: POOLING
bottom: "conv3"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "fc6"
type: INNER_PRODUCT
bottom: "pool5"
top: "fc6"
inner_product_param {
num_output: 512
}
}
layers {
name: "relu6"
type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
name: "drop6"
type: DROPOUT
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc7"
type: INNER_PRODUCT
bottom: "fc6"
top: "fc7"
inner_product_param {
num_output: 512
}
}
layers {
name: "relu7"
type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
name: "drop7"
type: DROPOUT
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc8"
type: INNER_PRODUCT
bottom: "fc7"
top: "fc8"
inner_product_param {
num_output: 2
}
}
layers {
name: "prob"
type: SOFTMAX
bottom: "fc8"
top: "prob"
}
net: "/home/ubuntu/AdienceFaces/gender/train_val.prototxt"
test_iter: 1000
test_interval: 1000
base_lr: 0.001
lr_policy: "step"
gamma: 0.1
stepsize: 10000
display: 20
max_iter: 50000
momentum: 0.9
weight_decay: 0.0005
snapshot: 1000
snapshot_prefix: "caffenet_train"
solver_mode: GPU
name: "CaffeNet"
layers {
name: "data"
type: DATA
top: "data"
top: "label"
data_param {
source: "/home/ubuntu/AdienceFaces/lmdb/gender_train_lmdb"
backend: LMDB
batch_size: 50
}
transform_param {
crop_size: 227
mean_file: "/home/ubuntu/AdienceFaces/mean_image/mean.binaryproto"
mirror: true
}
include: { phase: TRAIN }
}
layers {
name: "data"
type: DATA
top: "data"
top: "label"
data_param {
source: "/home/ubuntu/AdienceFaces/lmdb/gender_val_lmdb"
backend: LMDB
batch_size: 50
}
transform_param {
crop_size: 227
mean_file: "/home/ubuntu/AdienceFaces/mean_image/mean.binaryproto"
mirror: false
}
include: { phase: TEST }
}
layers {
name: "conv1"
type: CONVOLUTION
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 96
kernel_size: 7
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "relu1"
type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
name: "pool1"
type: POOLING
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm1"
type: LRN
bottom: "pool1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv2"
type: CONVOLUTION
bottom: "norm1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu2"
type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
name: "pool2"
type: POOLING
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm2"
type: LRN
bottom: "pool2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv3"
type: CONVOLUTION
bottom: "norm2"
top: "conv3"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers{
name: "relu3"
type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
name: "pool5"
type: POOLING
bottom: "conv3"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "fc6"
type: INNER_PRODUCT
bottom: "pool5"
top: "fc6"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 512
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu6"
type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
name: "drop6"
type: DROPOUT
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc7"
type: INNER_PRODUCT
bottom: "fc6"
top: "fc7"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 512
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu7"
type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
name: "drop7"
type: DROPOUT
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc8"
type: INNER_PRODUCT
bottom: "fc7"
top: "fc8"
blobs_lr: 10
blobs_lr: 20
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "accuracy"
type: ACCURACY
bottom: "fc8"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
}
layers {
name: "loss"
type: SOFTMAX_LOSS
bottom: "fc8"
bottom: "label"
top: "loss"
}
@UttamDwivedi

Hello,
Can you tell me how did you create age_net.caffemodel. I am a beginner but I have to create a caffemodel for my project. Please guide me. It will be a great help

@darshanime

I am getting the following error when I try to run the example just like in the ipynb :
I just saved the ipnb code to script.py

~/Downloads/cnn_age_gender_models_and_data.0.0.2$ python script.py 
WARNING: Logging before InitGoogleLogging() is written to STDERR
I0821 17:27:08.182063 12161 upgrade_proto.cpp:618] Attempting to upgrade input file specified using deprecated V1LayerParameter: ./deploy_age.prototxt
I0821 17:27:08.182171 12161 upgrade_proto.cpp:626] Successfully upgraded file specified using deprecated V1LayerParameter
I0821 17:27:08.182332 12161 net.cpp:50] Initializing net from parameters: 
name: "CaffeNet"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 227
input_dim: 227
state {
  phase: TEST
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  convolution_param {
    num_output: 96
    kernel_size: 7
    stride: 4
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "norm1"
  type: "LRN"
  bottom: "pool1"
  top: "norm1"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "norm1"
  top: "conv2"
  convolution_param {
    num_output: 256
    pad: 2
    kernel_size: 5
  }
}
layer {
  name: "relu2"
  type: "ReLU"
  bottom: "conv2"
  top: "conv2"
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "conv2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "norm2"
  type: "LRN"
  bottom: "pool2"
  top: "norm2"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "conv3"
  type: "Convolution"
  bottom: "norm2"
  top: "conv3"
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
  }
}
layer {
  name: "relu3"
  type: "ReLU"
  bottom: "conv3"
  top: "conv3"
}
layer {
  name: "pool5"
  type: "Pooling"
  bottom: "conv3"
  top: "pool5"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "fc6"
  type: "InnerProduct"
  bottom: "pool5"
  top: "fc6"
  inner_product_param {
    num_output: 512
  }
}
layer {
  name: "relu6"
  type: "ReLU"
  bottom: "fc6"
  top: "fc6"
}
layer {
  name: "drop6"
  type: "Dropout"
  bottom: "fc6"
  top: "fc6"
  dropout_param {
    dropout_ratio: 0.5
  }
}
layer {
  name: "fc7"
  type: "InnerProduct"
  bottom: "fc6"
  top: "fc7"
  inner_product_param {
    num_output: 512
  }
}
layer {
  name: "relu7"
  type: "ReLU"
  bottom: "fc7"
  top: "fc7"
}
layer {
  name: "drop7"
  type: "Dropout"
  bottom: "fc7"
  top: "fc7"
  dropout_param {
    dropout_ratio: 0.5
  }
}
layer {
  name: "fc8"
  type: "InnerProduct"
  bottom: "fc7"
  top: "fc8"
  inner_product_param {
    num_output: 8
  }
}
layer {
  name: "prob"
  type: "Softmax"
  bottom: "fc8"
  top: "prob"
}
I0821 17:27:08.182857 12161 net.cpp:434] Input 0 -> data
I0821 17:27:08.182906 12161 layer_factory.hpp:75] Creating layer conv1
I0821 17:27:08.182929 12161 net.cpp:110] Creating Layer conv1
I0821 17:27:08.182940 12161 net.cpp:476] conv1 <- data
I0821 17:27:08.182952 12161 net.cpp:432] conv1 -> conv1
I0821 17:27:08.183020 12161 net.cpp:155] Setting up conv1
I0821 17:27:08.183054 12161 net.cpp:163] Top shape: 1 96 56 56 (301056)
I0821 17:27:08.183073 12161 layer_factory.hpp:75] Creating layer relu1
I0821 17:27:08.183087 12161 net.cpp:110] Creating Layer relu1
I0821 17:27:08.183097 12161 net.cpp:476] relu1 <- conv1
I0821 17:27:08.183107 12161 net.cpp:419] relu1 -> conv1 (in-place)
I0821 17:27:08.183122 12161 net.cpp:155] Setting up relu1
I0821 17:27:08.183140 12161 net.cpp:163] Top shape: 1 96 56 56 (301056)
I0821 17:27:08.183148 12161 layer_factory.hpp:75] Creating layer pool1
I0821 17:27:08.183161 12161 net.cpp:110] Creating Layer pool1
I0821 17:27:08.183171 12161 net.cpp:476] pool1 <- conv1
I0821 17:27:08.183181 12161 net.cpp:432] pool1 -> pool1
I0821 17:27:08.183199 12161 net.cpp:155] Setting up pool1
I0821 17:27:08.183212 12161 net.cpp:163] Top shape: 1 96 28 28 (75264)
I0821 17:27:08.183221 12161 layer_factory.hpp:75] Creating layer norm1
I0821 17:27:08.183234 12161 net.cpp:110] Creating Layer norm1
I0821 17:27:08.183244 12161 net.cpp:476] norm1 <- pool1
I0821 17:27:08.183254 12161 net.cpp:432] norm1 -> norm1
I0821 17:27:08.183282 12161 net.cpp:155] Setting up norm1
I0821 17:27:08.183292 12161 net.cpp:163] Top shape: 1 96 28 28 (75264)
I0821 17:27:08.183301 12161 layer_factory.hpp:75] Creating layer conv2
I0821 17:27:08.183311 12161 net.cpp:110] Creating Layer conv2
I0821 17:27:08.183320 12161 net.cpp:476] conv2 <- norm1
I0821 17:27:08.183331 12161 net.cpp:432] conv2 -> conv2
I0821 17:27:08.184334 12161 net.cpp:155] Setting up conv2
I0821 17:27:08.184376 12161 net.cpp:163] Top shape: 1 256 28 28 (200704)
I0821 17:27:08.184394 12161 layer_factory.hpp:75] Creating layer relu2
I0821 17:27:08.184408 12161 net.cpp:110] Creating Layer relu2
I0821 17:27:08.184419 12161 net.cpp:476] relu2 <- conv2
I0821 17:27:08.184430 12161 net.cpp:419] relu2 -> conv2 (in-place)
I0821 17:27:08.184445 12161 net.cpp:155] Setting up relu2
I0821 17:27:08.184456 12161 net.cpp:163] Top shape: 1 256 28 28 (200704)
I0821 17:27:08.184466 12161 layer_factory.hpp:75] Creating layer pool2
I0821 17:27:08.184478 12161 net.cpp:110] Creating Layer pool2
I0821 17:27:08.184487 12161 net.cpp:476] pool2 <- conv2
I0821 17:27:08.184497 12161 net.cpp:432] pool2 -> pool2
I0821 17:27:08.184514 12161 net.cpp:155] Setting up pool2
I0821 17:27:08.184525 12161 net.cpp:163] Top shape: 1 256 14 14 (50176)
I0821 17:27:08.184533 12161 layer_factory.hpp:75] Creating layer norm2
I0821 17:27:08.184546 12161 net.cpp:110] Creating Layer norm2
I0821 17:27:08.184556 12161 net.cpp:476] norm2 <- pool2
I0821 17:27:08.184566 12161 net.cpp:432] norm2 -> norm2
I0821 17:27:08.184578 12161 net.cpp:155] Setting up norm2
I0821 17:27:08.184589 12161 net.cpp:163] Top shape: 1 256 14 14 (50176)
I0821 17:27:08.184598 12161 layer_factory.hpp:75] Creating layer conv3
I0821 17:27:08.184612 12161 net.cpp:110] Creating Layer conv3
I0821 17:27:08.184619 12161 net.cpp:476] conv3 <- norm2
I0821 17:27:08.184630 12161 net.cpp:432] conv3 -> conv3
I0821 17:27:08.186161 12161 net.cpp:155] Setting up conv3
I0821 17:27:08.186200 12161 net.cpp:163] Top shape: 1 384 14 14 (75264)
I0821 17:27:08.186221 12161 layer_factory.hpp:75] Creating layer relu3
I0821 17:27:08.186238 12161 net.cpp:110] Creating Layer relu3
I0821 17:27:08.186247 12161 net.cpp:476] relu3 <- conv3
I0821 17:27:08.186259 12161 net.cpp:419] relu3 -> conv3 (in-place)
I0821 17:27:08.186275 12161 net.cpp:155] Setting up relu3
I0821 17:27:08.186285 12161 net.cpp:163] Top shape: 1 384 14 14 (75264)
I0821 17:27:08.186293 12161 layer_factory.hpp:75] Creating layer pool5
I0821 17:27:08.186305 12161 net.cpp:110] Creating Layer pool5
I0821 17:27:08.186314 12161 net.cpp:476] pool5 <- conv3
I0821 17:27:08.186324 12161 net.cpp:432] pool5 -> pool5
I0821 17:27:08.186342 12161 net.cpp:155] Setting up pool5
I0821 17:27:08.186353 12161 net.cpp:163] Top shape: 1 384 7 7 (18816)
I0821 17:27:08.186362 12161 layer_factory.hpp:75] Creating layer fc6
I0821 17:27:08.186373 12161 net.cpp:110] Creating Layer fc6
I0821 17:27:08.186383 12161 net.cpp:476] fc6 <- pool5
I0821 17:27:08.186393 12161 net.cpp:432] fc6 -> fc6
I0821 17:27:08.204594 12161 net.cpp:155] Setting up fc6
I0821 17:27:08.204668 12161 net.cpp:163] Top shape: 1 512 (512)
I0821 17:27:08.204706 12161 layer_factory.hpp:75] Creating layer relu6
I0821 17:27:08.204727 12161 net.cpp:110] Creating Layer relu6
I0821 17:27:08.204741 12161 net.cpp:476] relu6 <- fc6
I0821 17:27:08.204757 12161 net.cpp:419] relu6 -> fc6 (in-place)
I0821 17:27:08.204782 12161 net.cpp:155] Setting up relu6
I0821 17:27:08.204805 12161 net.cpp:163] Top shape: 1 512 (512)
I0821 17:27:08.204823 12161 layer_factory.hpp:75] Creating layer drop6
I0821 17:27:08.204843 12161 net.cpp:110] Creating Layer drop6
I0821 17:27:08.204854 12161 net.cpp:476] drop6 <- fc6
I0821 17:27:08.204864 12161 net.cpp:419] drop6 -> fc6 (in-place)
I0821 17:27:08.204881 12161 net.cpp:155] Setting up drop6
I0821 17:27:08.204892 12161 net.cpp:163] Top shape: 1 512 (512)
I0821 17:27:08.204901 12161 layer_factory.hpp:75] Creating layer fc7
I0821 17:27:08.204913 12161 net.cpp:110] Creating Layer fc7
I0821 17:27:08.204928 12161 net.cpp:476] fc7 <- fc6
I0821 17:27:08.204939 12161 net.cpp:432] fc7 -> fc7
I0821 17:27:08.205255 12161 net.cpp:155] Setting up fc7
I0821 17:27:08.205272 12161 net.cpp:163] Top shape: 1 512 (512)
I0821 17:27:08.205298 12161 layer_factory.hpp:75] Creating layer relu7
I0821 17:27:08.205312 12161 net.cpp:110] Creating Layer relu7
I0821 17:27:08.205322 12161 net.cpp:476] relu7 <- fc7
I0821 17:27:08.205332 12161 net.cpp:419] relu7 -> fc7 (in-place)
I0821 17:27:08.205344 12161 net.cpp:155] Setting up relu7
I0821 17:27:08.205354 12161 net.cpp:163] Top shape: 1 512 (512)
I0821 17:27:08.205363 12161 layer_factory.hpp:75] Creating layer drop7
I0821 17:27:08.205376 12161 net.cpp:110] Creating Layer drop7
I0821 17:27:08.205386 12161 net.cpp:476] drop7 <- fc7
I0821 17:27:08.205396 12161 net.cpp:419] drop7 -> fc7 (in-place)
I0821 17:27:08.205412 12161 net.cpp:155] Setting up drop7
I0821 17:27:08.205422 12161 net.cpp:163] Top shape: 1 512 (512)
I0821 17:27:08.205431 12161 layer_factory.hpp:75] Creating layer fc8
I0821 17:27:08.205443 12161 net.cpp:110] Creating Layer fc8
I0821 17:27:08.205452 12161 net.cpp:476] fc8 <- fc7
I0821 17:27:08.205463 12161 net.cpp:432] fc8 -> fc8
I0821 17:27:08.205490 12161 net.cpp:155] Setting up fc8
I0821 17:27:08.205503 12161 net.cpp:163] Top shape: 1 8 (8)
I0821 17:27:08.205515 12161 layer_factory.hpp:75] Creating layer prob
I0821 17:27:08.205528 12161 net.cpp:110] Creating Layer prob
I0821 17:27:08.205538 12161 net.cpp:476] prob <- fc8
I0821 17:27:08.205549 12161 net.cpp:432] prob -> prob
I0821 17:27:08.205566 12161 net.cpp:155] Setting up prob
I0821 17:27:08.205579 12161 net.cpp:163] Top shape: 1 8 (8)
I0821 17:27:08.205588 12161 net.cpp:240] prob does not need backward computation.
I0821 17:27:08.205600 12161 net.cpp:240] fc8 does not need backward computation.
I0821 17:27:08.205610 12161 net.cpp:240] drop7 does not need backward computation.
I0821 17:27:08.205617 12161 net.cpp:240] relu7 does not need backward computation.
I0821 17:27:08.205626 12161 net.cpp:240] fc7 does not need backward computation.
I0821 17:27:08.205634 12161 net.cpp:240] drop6 does not need backward computation.
I0821 17:27:08.205643 12161 net.cpp:240] relu6 does not need backward computation.
I0821 17:27:08.205652 12161 net.cpp:240] fc6 does not need backward computation.
I0821 17:27:08.205662 12161 net.cpp:240] pool5 does not need backward computation.
I0821 17:27:08.205670 12161 net.cpp:240] relu3 does not need backward computation.
I0821 17:27:08.205678 12161 net.cpp:240] conv3 does not need backward computation.
I0821 17:27:08.205688 12161 net.cpp:240] norm2 does not need backward computation.
I0821 17:27:08.205698 12161 net.cpp:240] pool2 does not need backward computation.
I0821 17:27:08.205708 12161 net.cpp:240] relu2 does not need backward computation.
I0821 17:27:08.205718 12161 net.cpp:240] conv2 does not need backward computation.
I0821 17:27:08.205740 12161 net.cpp:240] norm1 does not need backward computation.
I0821 17:27:08.205749 12161 net.cpp:240] pool1 does not need backward computation.
I0821 17:27:08.205759 12161 net.cpp:240] relu1 does not need backward computation.
I0821 17:27:08.205767 12161 net.cpp:240] conv1 does not need backward computation.
I0821 17:27:08.205776 12161 net.cpp:283] This network produces output prob
I0821 17:27:08.205793 12161 net.cpp:297] Network initialization done.
I0821 17:27:08.205801 12161 net.cpp:298] Memory required for data: 5707328
I0821 17:27:08.305044 12161 upgrade_proto.cpp:618] Attempting to upgrade input file specified using deprecated V1LayerParameter: ./age_net.caffemodel
I0821 17:27:08.352002 12161 upgrade_proto.cpp:626] Successfully upgraded file specified using deprecated V1LayerParameter
Traceback (most recent call last):
  File "script.py", line 28, in <module>
    image_dims=(256, 256))
  File "/home/radar/caffe/python/caffe/classifier.py", line 34, in __init__
    self.transformer.set_mean(in_, mean)
  File "/home/radar/caffe/python/caffe/io.py", line 255, in set_mean
    raise ValueError('Mean shape incompatible with input shape.')
ValueError: Mean shape incompatible with input shape.
@darshanime

Solved !

Had to make the following changes in 'caffe-master'/python/caffe/io.py with :

-                raise ValueError('Mean shape incompatible with input shape.')
+                print(self.inputs[in_])
+            in_shape = self.inputs[in_][1:]
+            m_min, m_max = mean.min(), mean.max()
+            normal_mean = (mean - m_min) / (m_max - m_min)
+            mean = resize_image(normal_mean.transpose((1,2,0)),
+                        in_shape[1:]).transpose((2,0,1)) * \
+                        (m_max - m_min) + m_min
+        #raise ValueError('Mean shape incompatible with input shape.')
@kli-nlpr

@darshanime How did you creat age_train_lmdb, which file do you use? the aligned folder? Thank you

@berak
berak commented Sep 14, 2015

hi @Gil , any tips on making e.g. the gender classification run with opencv's new dnn module / caffe wrapper ?

@GilLevi
Owner
GilLevi commented Sep 15, 2015

@UttamDwivedi - sorry, but I've just seen your message. If it's still relevant, please mail me at gil.levi100@gmail.com and I'll try to help.

@darshanime, indeed there is a problem with a newer implementation of Caffe's io.py. We reported it in the project page: http://www.openu.ac.il/home/hassner/projects/cnn_agegender/

@berak, nice to see that CNN finally made it into OpenCV! I'll try to create a demo of our age/gender nets.

Gil.

@kli-nlpr

@Gil , which part is your traing data at http://www.cslab.openu.ac.il/download/AdienceBenchmarkOfUnfilteredFacesForGenderAndAgeClassification/

I find there are some image with age None, and some image don't have gender label. Did you use them for training? Because I want to use your dataset and compare to your result.
Thank you.

@GilLevi
Owner
GilLevi commented Oct 5, 2015

@kli-nlpr , I used only the labelled images. I can send you the train/val.txt to create the lmdb for training.

Gil

@kli-nlpr
kli-nlpr commented Oct 6, 2015

Hi, @Gil, can you send the train/val.txt to me?
My email is kai.li@nlpr.ia.ac.cn
Thank you very much

@GilLevi
Owner
GilLevi commented Oct 19, 2015

@kli-nlpr, sorry for the late response.

I've added a new git repository for creating the train/val.txt files:
https://github.com/GilLevi/AgeGenderDeepLearning

@nitish11
nitish11 commented Aug 16, 2016 edited

Hi,

I am using the gender detection model in Torch and in Caffe for detection from live camera.

Running the code on CPU and keeping the same models file, I am getting different prediction times.
For Caffe, it is ~1.30 seconds per frame.
For Torch, it is ~0.45 seconds per frame.

What could be the possible reason for the time difference? Is Torch faster than Caffe?

@GilLevi
Owner
GilLevi commented Aug 25, 2016

@nitish11, like I commented in the repository , I don't have any experience in Torch, so I really can't tell which is faster.

@engesilva

Hi Levi,

Levi I have some doubts about the network architecture used for the genre classification training:

  • Its current architecture identifies the "type" layer by capital letters (eg, CONVOLUTION.), Because you can not conduct training for replacing CONVOLUTION by "Convolution"; DATA by "Data"?
@GilLevi
Owner
GilLevi commented Oct 24, 2016

Hi @engesilva,

I think you might need to replace ALL CAPITALS with lowercase letters, depending on the version of Caffe you are using.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment