Skip to content

Instantly share code, notes, and snippets.

@EncodeTS
Last active February 19, 2024 06:56
Show Gist options
  • Save EncodeTS/6bbe8cb8bebad7a672f0d872561782d9 to your computer and use it in GitHub Desktop.
Save EncodeTS/6bbe8cb8bebad7a672f0d872561782d9 to your computer and use it in GitHub Desktop.
VGG-Face model for keras

VGG-Face model for Keras

This is the Keras model of VGG-Face.

It has been obtained through the following method:

  • vgg-face-keras:directly convert the vgg-face matconvnet model to keras model
  • vgg-face-keras-fc:first convert vgg-face caffe model to mxnet model,and then convert it to keras model

Details about the network architecture can be found in the following paper:

Deep Face Recognition
O. M. Parkhi, A. Vedaldi, A. Zisserman
British Machine Vision Conference, 2015

Please cite the paper if you use the models.

Contents:

model and usage demo: see vgg-face-keras.py or vgg-face-keras-fc.py

The only difference between them is the last few layers(see the code and you'll understand),but they produce the same result.

weights: 

Notice:

Please use this model in theano mode.

from keras.models import Model
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout
from PIL import Image
import numpy as np
def vgg_face(weights_path=None):
img = Input(shape=(3, 224, 224))
pad1_1 = ZeroPadding2D(padding=(1, 1))(img)
conv1_1 = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(pad1_1)
pad1_2 = ZeroPadding2D(padding=(1, 1))(conv1_1)
conv1_2 = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(pad1_2)
pool1 = MaxPooling2D((2, 2), strides=(2, 2))(conv1_2)
pad2_1 = ZeroPadding2D((1, 1))(pool1)
conv2_1 = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(pad2_1)
pad2_2 = ZeroPadding2D((1, 1))(conv2_1)
conv2_2 = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(pad2_2)
pool2 = MaxPooling2D((2, 2), strides=(2, 2))(conv2_2)
pad3_1 = ZeroPadding2D((1, 1))(pool2)
conv3_1 = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(pad3_1)
pad3_2 = ZeroPadding2D((1, 1))(conv3_1)
conv3_2 = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(pad3_2)
pad3_3 = ZeroPadding2D((1, 1))(conv3_2)
conv3_3 = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(pad3_3)
pool3 = MaxPooling2D((2, 2), strides=(2, 2))(conv3_3)
pad4_1 = ZeroPadding2D((1, 1))(pool3)
conv4_1 = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(pad4_1)
pad4_2 = ZeroPadding2D((1, 1))(conv4_1)
conv4_2 = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(pad4_2)
pad4_3 = ZeroPadding2D((1, 1))(conv4_2)
conv4_3 = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(pad4_3)
pool4 = MaxPooling2D((2, 2), strides=(2, 2))(conv4_3)
pad5_1 = ZeroPadding2D((1, 1))(pool4)
conv5_1 = Convolution2D(512, 3, 3, activation='relu', name='conv5_1')(pad5_1)
pad5_2 = ZeroPadding2D((1, 1))(conv5_1)
conv5_2 = Convolution2D(512, 3, 3, activation='relu', name='conv5_2')(pad5_2)
pad5_3 = ZeroPadding2D((1, 1))(conv5_2)
conv5_3 = Convolution2D(512, 3, 3, activation='relu', name='conv5_3')(pad5_3)
pool5 = MaxPooling2D((2, 2), strides=(2, 2))(conv5_3)
flat = Flatten()(pool5)
fc6 = Dense(4096, activation='relu', name='fc6')(flat)
fc6_drop = Dropout(0.5)(fc6)
fc7 = Dense(4096, activation='relu', name='fc7')(fc6_drop)
fc7_drop = Dropout(0.5)(fc7)
out = Dense(2622, activation='softmax', name='fc8')(fc7_drop)
model = Model(input=img, output=out)
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == "__main__":
im = Image.open('A.J._Buckley.jpg')
im = im.resize((224,224))
im = np.array(im).astype(np.float32)
# im[:,:,0] -= 129.1863
# im[:,:,1] -= 104.7624
# im[:,:,2] -= 93.5940
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
# Test pretrained model
model = vgg_face('vgg-face-keras-fc.h5')
out = model.predict(im)
print(out[0][0])
from keras.models import Model
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dropout, Activation
from PIL import Image
import numpy as np
def vgg_face(weights_path=None):
img = Input(shape=(3, 224, 224))
pad1_1 = ZeroPadding2D(padding=(1, 1))(img)
conv1_1 = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(pad1_1)
pad1_2 = ZeroPadding2D(padding=(1, 1))(conv1_1)
conv1_2 = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(pad1_2)
pool1 = MaxPooling2D((2, 2), strides=(2, 2))(conv1_2)
pad2_1 = ZeroPadding2D((1, 1))(pool1)
conv2_1 = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(pad2_1)
pad2_2 = ZeroPadding2D((1, 1))(conv2_1)
conv2_2 = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(pad2_2)
pool2 = MaxPooling2D((2, 2), strides=(2, 2))(conv2_2)
pad3_1 = ZeroPadding2D((1, 1))(pool2)
conv3_1 = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(pad3_1)
pad3_2 = ZeroPadding2D((1, 1))(conv3_1)
conv3_2 = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(pad3_2)
pad3_3 = ZeroPadding2D((1, 1))(conv3_2)
conv3_3 = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(pad3_3)
pool3 = MaxPooling2D((2, 2), strides=(2, 2))(conv3_3)
pad4_1 = ZeroPadding2D((1, 1))(pool3)
conv4_1 = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(pad4_1)
pad4_2 = ZeroPadding2D((1, 1))(conv4_1)
conv4_2 = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(pad4_2)
pad4_3 = ZeroPadding2D((1, 1))(conv4_2)
conv4_3 = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(pad4_3)
pool4 = MaxPooling2D((2, 2), strides=(2, 2))(conv4_3)
pad5_1 = ZeroPadding2D((1, 1))(pool4)
conv5_1 = Convolution2D(512, 3, 3, activation='relu', name='conv5_1')(pad5_1)
pad5_2 = ZeroPadding2D((1, 1))(conv5_1)
conv5_2 = Convolution2D(512, 3, 3, activation='relu', name='conv5_2')(pad5_2)
pad5_3 = ZeroPadding2D((1, 1))(conv5_2)
conv5_3 = Convolution2D(512, 3, 3, activation='relu', name='conv5_3')(pad5_3)
pool5 = MaxPooling2D((2, 2), strides=(2, 2))(conv5_3)
fc6 = Convolution2D(4096, 7, 7, activation='relu', name='fc6')(pool5)
fc6_drop = Dropout(0.5)(fc6)
fc7 = Convolution2D(4096, 1, 1, activation='relu', name='fc7')(fc6_drop)
fc7_drop = Dropout(0.5)(fc7)
fc8 = Convolution2D(2622, 1, 1, name='fc8')(fc7_drop)
flat = Flatten()(fc8)
out = Activation('softmax')(flat)
model = Model(input=img, output=out)
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == "__main__":
im = Image.open('A.J._Buckley.jpg')
im = im.resize((224,224))
im = np.array(im).astype(np.float32)
# im[:,:,0] -= 129.1863
# im[:,:,1] -= 104.7624
# im[:,:,2] -= 93.5940
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
# Test pretrained model
model = vgg_face('vgg-face-keras.h5')
out = model.predict(im)
print(out[0][0])
@kaushik20-dev
Copy link

what is the dataset used for this model?

@wen-fei
Copy link

wen-fei commented Mar 14, 2018

Thanks, is anybody know the pytorch version?

@kristianSN
Copy link

Question: The mean subtraction done in

im[:,:,0] -= 129.1863

im[:,:,1] -= 104.7624

im[:,:,2] -= 93.5940

1: Are these the channel means obtained by the people who trained the VGG face?
2: If so, why are they commented?

@shahryarabaki
Copy link

I got this error when I ran the code:

VGG_Tensorflow_V2.py:15: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (3, 3), activation="relu")`
  model.add(Convolution2D(64, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:17: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (3, 3), activation="relu")`
  model.add(Convolution2D(64, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:21: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(128, (3, 3), activation="relu")`
  model.add(Convolution2D(128, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:23: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(128, (3, 3), activation="relu")`
  model.add(Convolution2D(128, 3, 3, activation='relu'))
Traceback (most recent call last):
  File "/home/mshafaei/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1576, in _create_c_op
    c_op = c_api.TF_FinishOperation(op_desc)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Negative dimension size caused by subtracting 2 from 1 for 'max_pooling2d_2/MaxPool' (op: 'MaxPool') with input shapes: [?,1,112,128].

@afruzan
Copy link

afruzan commented Nov 24, 2018

I also got this error when I ran vgg-face-keras.py:
tensorflow.python.framework.errors_impl.InvalidArgumentError: Negative dimension size caused by subtracting 2 from 1 for 'max_pooling2d_2/MaxPool' (op: 'MaxPool') with input shapes: [?,1,112,128].

@finger2vec
Copy link

I also got this error when I ran vgg-face-keras.py:
tensorflow.python.framework.errors_impl.InvalidArgumentError: Negative dimension size caused by subtracting 2 from 1 for 'max_pooling2d_2/MaxPool' (op: 'MaxPool') with input shapes: [?,1,112,128].

You could add channels_first as follow like this

from keras import backend as K
K.set_image_data_format('channels_first')

@finger2vec
Copy link

I got this error when I ran the code:

VGG_Tensorflow_V2.py:15: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (3, 3), activation="relu")`
  model.add(Convolution2D(64, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:17: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (3, 3), activation="relu")`
  model.add(Convolution2D(64, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:21: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(128, (3, 3), activation="relu")`
  model.add(Convolution2D(128, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:23: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(128, (3, 3), activation="relu")`
  model.add(Convolution2D(128, 3, 3, activation='relu'))
Traceback (most recent call last):
  File "/home/mshafaei/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1576, in _create_c_op
    c_op = c_api.TF_FinishOperation(op_desc)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Negative dimension size caused by subtracting 2 from 1 for 'max_pooling2d_2/MaxPool' (op: 'MaxPool') with input shapes: [?,1,112,128].

You could add channels_first as follow like this

from keras import backend as K
K.set_image_data_format('channels_first')

@wgsjack199213
Copy link

00000001
Here is a test picture,the probability of the picture belonging to the first class should be 0.99953598.

Sorry but I ran the vgg-face-keras.py program using theano backend and the maximum probability is only 0.207927 (class 36, not class 1). Did I make any mistake?

Thanks!

@afruzan
Copy link

afruzan commented Feb 17, 2019

Question: The mean subtraction done in
im[:,:,0] -= 129.1863
im[:,:,1] -= 104.7624
im[:,:,2] -= 93.5940
1: Are these the channel means obtained by the people who trained the VGG face?
2: If so, why are they commented?

these are my questions too... any one can answer how these means are computed? is it means we should also subtract these means from train images?

@aco92
Copy link

aco92 commented Apr 4, 2019

00000001
Here is a test picture,the probability of the picture belonging to the first class should be 0.99953598.

Sorry but I ran the vgg-face-keras.py program using theano backend and the maximum probability is only 0.207927 (class 36, not class 1). Did I make any mistake?

Thanks!

I run it with tf backend and max probability for that test image is 0.20376973. Does anybody know why we are not getting expected result?

Regards,
Aleksandar

@rhlshah
Copy link

rhlshah commented Jun 11, 2019

@EncodeTS,
I run it with tf backend but I am getting system error :" Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2".Pls help me out.

@moe-assal
Copy link

@rhlshah
don't worry about this problem. any program you run will work as usual. the reason this happening is that you are using a CPU that supports AVX2, which is a set of instructions that your CPU can do. it will accelerate some vector operations. you can install another binary of TensorFlow but i don't recommend that since it's a hard thing to do.

@wayne841213
Copy link

@sohaibwaheedgithub
Copy link

I am getting this error while using "vgg-face-keras.h5"
Negative dimension size caused by subtracting 2 from 1 for '{{node max_pooling2d_2/MaxPool}} = MaxPoolT=DT_FLOAT, data_format="NCHW", explicit_paddings=[], ksize=[1, 1, 2, 2], padding="VALID", strides=[1, 1, 2, 2]' with input shapes: [?,256,1,1].

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment