Skip to content

Instantly share code, notes, and snippets.

@EncodeTS
Last active February 19, 2024 06:56
Show Gist options
  • Save EncodeTS/6bbe8cb8bebad7a672f0d872561782d9 to your computer and use it in GitHub Desktop.
Save EncodeTS/6bbe8cb8bebad7a672f0d872561782d9 to your computer and use it in GitHub Desktop.
VGG-Face model for keras

VGG-Face model for Keras

This is the Keras model of VGG-Face.

It has been obtained through the following method:

  • vgg-face-keras:directly convert the vgg-face matconvnet model to keras model
  • vgg-face-keras-fc:first convert vgg-face caffe model to mxnet model,and then convert it to keras model

Details about the network architecture can be found in the following paper:

Deep Face Recognition
O. M. Parkhi, A. Vedaldi, A. Zisserman
British Machine Vision Conference, 2015

Please cite the paper if you use the models.

Contents:

model and usage demo: see vgg-face-keras.py or vgg-face-keras-fc.py

The only difference between them is the last few layers(see the code and you'll understand),but they produce the same result.

weights: 

Notice:

Please use this model in theano mode.

from keras.models import Model
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout
from PIL import Image
import numpy as np
def vgg_face(weights_path=None):
img = Input(shape=(3, 224, 224))
pad1_1 = ZeroPadding2D(padding=(1, 1))(img)
conv1_1 = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(pad1_1)
pad1_2 = ZeroPadding2D(padding=(1, 1))(conv1_1)
conv1_2 = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(pad1_2)
pool1 = MaxPooling2D((2, 2), strides=(2, 2))(conv1_2)
pad2_1 = ZeroPadding2D((1, 1))(pool1)
conv2_1 = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(pad2_1)
pad2_2 = ZeroPadding2D((1, 1))(conv2_1)
conv2_2 = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(pad2_2)
pool2 = MaxPooling2D((2, 2), strides=(2, 2))(conv2_2)
pad3_1 = ZeroPadding2D((1, 1))(pool2)
conv3_1 = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(pad3_1)
pad3_2 = ZeroPadding2D((1, 1))(conv3_1)
conv3_2 = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(pad3_2)
pad3_3 = ZeroPadding2D((1, 1))(conv3_2)
conv3_3 = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(pad3_3)
pool3 = MaxPooling2D((2, 2), strides=(2, 2))(conv3_3)
pad4_1 = ZeroPadding2D((1, 1))(pool3)
conv4_1 = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(pad4_1)
pad4_2 = ZeroPadding2D((1, 1))(conv4_1)
conv4_2 = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(pad4_2)
pad4_3 = ZeroPadding2D((1, 1))(conv4_2)
conv4_3 = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(pad4_3)
pool4 = MaxPooling2D((2, 2), strides=(2, 2))(conv4_3)
pad5_1 = ZeroPadding2D((1, 1))(pool4)
conv5_1 = Convolution2D(512, 3, 3, activation='relu', name='conv5_1')(pad5_1)
pad5_2 = ZeroPadding2D((1, 1))(conv5_1)
conv5_2 = Convolution2D(512, 3, 3, activation='relu', name='conv5_2')(pad5_2)
pad5_3 = ZeroPadding2D((1, 1))(conv5_2)
conv5_3 = Convolution2D(512, 3, 3, activation='relu', name='conv5_3')(pad5_3)
pool5 = MaxPooling2D((2, 2), strides=(2, 2))(conv5_3)
flat = Flatten()(pool5)
fc6 = Dense(4096, activation='relu', name='fc6')(flat)
fc6_drop = Dropout(0.5)(fc6)
fc7 = Dense(4096, activation='relu', name='fc7')(fc6_drop)
fc7_drop = Dropout(0.5)(fc7)
out = Dense(2622, activation='softmax', name='fc8')(fc7_drop)
model = Model(input=img, output=out)
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == "__main__":
im = Image.open('A.J._Buckley.jpg')
im = im.resize((224,224))
im = np.array(im).astype(np.float32)
# im[:,:,0] -= 129.1863
# im[:,:,1] -= 104.7624
# im[:,:,2] -= 93.5940
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
# Test pretrained model
model = vgg_face('vgg-face-keras-fc.h5')
out = model.predict(im)
print(out[0][0])
from keras.models import Model
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dropout, Activation
from PIL import Image
import numpy as np
def vgg_face(weights_path=None):
img = Input(shape=(3, 224, 224))
pad1_1 = ZeroPadding2D(padding=(1, 1))(img)
conv1_1 = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(pad1_1)
pad1_2 = ZeroPadding2D(padding=(1, 1))(conv1_1)
conv1_2 = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(pad1_2)
pool1 = MaxPooling2D((2, 2), strides=(2, 2))(conv1_2)
pad2_1 = ZeroPadding2D((1, 1))(pool1)
conv2_1 = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(pad2_1)
pad2_2 = ZeroPadding2D((1, 1))(conv2_1)
conv2_2 = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(pad2_2)
pool2 = MaxPooling2D((2, 2), strides=(2, 2))(conv2_2)
pad3_1 = ZeroPadding2D((1, 1))(pool2)
conv3_1 = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(pad3_1)
pad3_2 = ZeroPadding2D((1, 1))(conv3_1)
conv3_2 = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(pad3_2)
pad3_3 = ZeroPadding2D((1, 1))(conv3_2)
conv3_3 = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(pad3_3)
pool3 = MaxPooling2D((2, 2), strides=(2, 2))(conv3_3)
pad4_1 = ZeroPadding2D((1, 1))(pool3)
conv4_1 = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(pad4_1)
pad4_2 = ZeroPadding2D((1, 1))(conv4_1)
conv4_2 = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(pad4_2)
pad4_3 = ZeroPadding2D((1, 1))(conv4_2)
conv4_3 = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(pad4_3)
pool4 = MaxPooling2D((2, 2), strides=(2, 2))(conv4_3)
pad5_1 = ZeroPadding2D((1, 1))(pool4)
conv5_1 = Convolution2D(512, 3, 3, activation='relu', name='conv5_1')(pad5_1)
pad5_2 = ZeroPadding2D((1, 1))(conv5_1)
conv5_2 = Convolution2D(512, 3, 3, activation='relu', name='conv5_2')(pad5_2)
pad5_3 = ZeroPadding2D((1, 1))(conv5_2)
conv5_3 = Convolution2D(512, 3, 3, activation='relu', name='conv5_3')(pad5_3)
pool5 = MaxPooling2D((2, 2), strides=(2, 2))(conv5_3)
fc6 = Convolution2D(4096, 7, 7, activation='relu', name='fc6')(pool5)
fc6_drop = Dropout(0.5)(fc6)
fc7 = Convolution2D(4096, 1, 1, activation='relu', name='fc7')(fc6_drop)
fc7_drop = Dropout(0.5)(fc7)
fc8 = Convolution2D(2622, 1, 1, name='fc8')(fc7_drop)
flat = Flatten()(fc8)
out = Activation('softmax')(flat)
model = Model(input=img, output=out)
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == "__main__":
im = Image.open('A.J._Buckley.jpg')
im = im.resize((224,224))
im = np.array(im).astype(np.float32)
# im[:,:,0] -= 129.1863
# im[:,:,1] -= 104.7624
# im[:,:,2] -= 93.5940
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
# Test pretrained model
model = vgg_face('vgg-face-keras.h5')
out = model.predict(im)
print(out[0][0])
@wayne841213
Copy link

@sohaibwaheedgithub
Copy link

I am getting this error while using "vgg-face-keras.h5"
Negative dimension size caused by subtracting 2 from 1 for '{{node max_pooling2d_2/MaxPool}} = MaxPoolT=DT_FLOAT, data_format="NCHW", explicit_paddings=[], ksize=[1, 1, 2, 2], padding="VALID", strides=[1, 1, 2, 2]' with input shapes: [?,256,1,1].

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment