Skip to content

Instantly share code, notes, and snippets.

@EncodeTS
Last active February 19, 2024 06:56
Show Gist options
  • Save EncodeTS/6bbe8cb8bebad7a672f0d872561782d9 to your computer and use it in GitHub Desktop.
Save EncodeTS/6bbe8cb8bebad7a672f0d872561782d9 to your computer and use it in GitHub Desktop.
VGG-Face model for keras

VGG-Face model for Keras

This is the Keras model of VGG-Face.

It has been obtained through the following method:

  • vgg-face-keras:directly convert the vgg-face matconvnet model to keras model
  • vgg-face-keras-fc:first convert vgg-face caffe model to mxnet model,and then convert it to keras model

Details about the network architecture can be found in the following paper:

Deep Face Recognition
O. M. Parkhi, A. Vedaldi, A. Zisserman
British Machine Vision Conference, 2015

Please cite the paper if you use the models.

Contents:

model and usage demo: see vgg-face-keras.py or vgg-face-keras-fc.py

The only difference between them is the last few layers(see the code and you'll understand),but they produce the same result.

weights: 

Notice:

Please use this model in theano mode.

from keras.models import Model
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout
from PIL import Image
import numpy as np
def vgg_face(weights_path=None):
img = Input(shape=(3, 224, 224))
pad1_1 = ZeroPadding2D(padding=(1, 1))(img)
conv1_1 = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(pad1_1)
pad1_2 = ZeroPadding2D(padding=(1, 1))(conv1_1)
conv1_2 = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(pad1_2)
pool1 = MaxPooling2D((2, 2), strides=(2, 2))(conv1_2)
pad2_1 = ZeroPadding2D((1, 1))(pool1)
conv2_1 = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(pad2_1)
pad2_2 = ZeroPadding2D((1, 1))(conv2_1)
conv2_2 = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(pad2_2)
pool2 = MaxPooling2D((2, 2), strides=(2, 2))(conv2_2)
pad3_1 = ZeroPadding2D((1, 1))(pool2)
conv3_1 = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(pad3_1)
pad3_2 = ZeroPadding2D((1, 1))(conv3_1)
conv3_2 = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(pad3_2)
pad3_3 = ZeroPadding2D((1, 1))(conv3_2)
conv3_3 = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(pad3_3)
pool3 = MaxPooling2D((2, 2), strides=(2, 2))(conv3_3)
pad4_1 = ZeroPadding2D((1, 1))(pool3)
conv4_1 = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(pad4_1)
pad4_2 = ZeroPadding2D((1, 1))(conv4_1)
conv4_2 = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(pad4_2)
pad4_3 = ZeroPadding2D((1, 1))(conv4_2)
conv4_3 = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(pad4_3)
pool4 = MaxPooling2D((2, 2), strides=(2, 2))(conv4_3)
pad5_1 = ZeroPadding2D((1, 1))(pool4)
conv5_1 = Convolution2D(512, 3, 3, activation='relu', name='conv5_1')(pad5_1)
pad5_2 = ZeroPadding2D((1, 1))(conv5_1)
conv5_2 = Convolution2D(512, 3, 3, activation='relu', name='conv5_2')(pad5_2)
pad5_3 = ZeroPadding2D((1, 1))(conv5_2)
conv5_3 = Convolution2D(512, 3, 3, activation='relu', name='conv5_3')(pad5_3)
pool5 = MaxPooling2D((2, 2), strides=(2, 2))(conv5_3)
flat = Flatten()(pool5)
fc6 = Dense(4096, activation='relu', name='fc6')(flat)
fc6_drop = Dropout(0.5)(fc6)
fc7 = Dense(4096, activation='relu', name='fc7')(fc6_drop)
fc7_drop = Dropout(0.5)(fc7)
out = Dense(2622, activation='softmax', name='fc8')(fc7_drop)
model = Model(input=img, output=out)
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == "__main__":
im = Image.open('A.J._Buckley.jpg')
im = im.resize((224,224))
im = np.array(im).astype(np.float32)
# im[:,:,0] -= 129.1863
# im[:,:,1] -= 104.7624
# im[:,:,2] -= 93.5940
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
# Test pretrained model
model = vgg_face('vgg-face-keras-fc.h5')
out = model.predict(im)
print(out[0][0])
from keras.models import Model
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dropout, Activation
from PIL import Image
import numpy as np
def vgg_face(weights_path=None):
img = Input(shape=(3, 224, 224))
pad1_1 = ZeroPadding2D(padding=(1, 1))(img)
conv1_1 = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(pad1_1)
pad1_2 = ZeroPadding2D(padding=(1, 1))(conv1_1)
conv1_2 = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(pad1_2)
pool1 = MaxPooling2D((2, 2), strides=(2, 2))(conv1_2)
pad2_1 = ZeroPadding2D((1, 1))(pool1)
conv2_1 = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(pad2_1)
pad2_2 = ZeroPadding2D((1, 1))(conv2_1)
conv2_2 = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(pad2_2)
pool2 = MaxPooling2D((2, 2), strides=(2, 2))(conv2_2)
pad3_1 = ZeroPadding2D((1, 1))(pool2)
conv3_1 = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(pad3_1)
pad3_2 = ZeroPadding2D((1, 1))(conv3_1)
conv3_2 = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(pad3_2)
pad3_3 = ZeroPadding2D((1, 1))(conv3_2)
conv3_3 = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(pad3_3)
pool3 = MaxPooling2D((2, 2), strides=(2, 2))(conv3_3)
pad4_1 = ZeroPadding2D((1, 1))(pool3)
conv4_1 = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(pad4_1)
pad4_2 = ZeroPadding2D((1, 1))(conv4_1)
conv4_2 = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(pad4_2)
pad4_3 = ZeroPadding2D((1, 1))(conv4_2)
conv4_3 = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(pad4_3)
pool4 = MaxPooling2D((2, 2), strides=(2, 2))(conv4_3)
pad5_1 = ZeroPadding2D((1, 1))(pool4)
conv5_1 = Convolution2D(512, 3, 3, activation='relu', name='conv5_1')(pad5_1)
pad5_2 = ZeroPadding2D((1, 1))(conv5_1)
conv5_2 = Convolution2D(512, 3, 3, activation='relu', name='conv5_2')(pad5_2)
pad5_3 = ZeroPadding2D((1, 1))(conv5_2)
conv5_3 = Convolution2D(512, 3, 3, activation='relu', name='conv5_3')(pad5_3)
pool5 = MaxPooling2D((2, 2), strides=(2, 2))(conv5_3)
fc6 = Convolution2D(4096, 7, 7, activation='relu', name='fc6')(pool5)
fc6_drop = Dropout(0.5)(fc6)
fc7 = Convolution2D(4096, 1, 1, activation='relu', name='fc7')(fc6_drop)
fc7_drop = Dropout(0.5)(fc7)
fc8 = Convolution2D(2622, 1, 1, name='fc8')(fc7_drop)
flat = Flatten()(fc8)
out = Activation('softmax')(flat)
model = Model(input=img, output=out)
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == "__main__":
im = Image.open('A.J._Buckley.jpg')
im = im.resize((224,224))
im = np.array(im).astype(np.float32)
# im[:,:,0] -= 129.1863
# im[:,:,1] -= 104.7624
# im[:,:,2] -= 93.5940
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
# Test pretrained model
model = vgg_face('vgg-face-keras.h5')
out = model.predict(im)
print(out[0][0])
@Patriciasr92
Copy link

Patriciasr92 commented Nov 19, 2016

Hi!
I am using your model. Well, a version of it because i am trying to impliment a finetuning of the last Fully Connected layer:

Here's the code
`
'''OJO: PARA QUE FUNCIONE :
nano ~/.keras/keras.json
{
"image_dim_ordering": "th",
"epsilon": 1e-07,
"floatx": "float32",
"backend": "tensorflow"
}
'''

def bottleneck():
    datagen = ImageDataGenerator(rescale=1.)
    generator = datagen.flow_from_directory(train_data_dir,
                                        target_size=(img_width, img_height),
                                        batch_size=32,
                                        class_mode=None,
                                        shuffle=False)

    pad1_1 = ZeroPadding2D(padding=(1, 1), name='in_train')(img)
    conv1_1 = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(pad1_1)
    pad1_2 = ZeroPadding2D(padding=(1, 1))(conv1_1)
    conv1_2 = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(pad1_2)
    pool1 = MaxPooling2D((2, 2), strides=(2, 2))(conv1_2)

    pad2_1 = ZeroPadding2D((1, 1), trainable=False)(pool1)
    conv2_1 = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(pad2_1)
    pad2_2 = ZeroPadding2D((1, 1), trainable=False)(conv2_1)
    conv2_2 = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(pad2_2)
    pool2 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv2_2)

    pad3_1 = ZeroPadding2D((1, 1))(pool2)
    conv3_1 = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(pad3_1)
    pad3_2 = ZeroPadding2D((1, 1))(conv3_1)
    conv3_2 = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(pad3_2)
    pad3_3 = ZeroPadding2D((1, 1))(conv3_2)
    conv3_3 = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(pad3_3)
    pool3 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv3_3)

    pad4_1 = ZeroPadding2D((1, 1))(pool3)
    conv4_1 = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(pad4_1)
    pad4_2 = ZeroPadding2D((1, 1))(conv4_1)
    conv4_2 = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(pad4_2)
    pad4_3 = ZeroPadding2D((1, 1))(conv4_2)
    conv4_3 = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(pad4_3)
    pool4 = MaxPooling2D((2, 2), strides=(2, 2))(conv4_3)

    pad5_1 = ZeroPadding2D((1, 1))(pool4)
    conv5_1 = Convolution2D(512, 3, 3, activation='relu', name='conv5_1')(pad5_1)
    pad5_2 = ZeroPadding2D((1, 1))(conv5_1)
    conv5_2 = Convolution2D(512, 3, 3, activation='relu', name='conv5_2') (pad5_2)
    pad5_3 = ZeroPadding2D((1, 1))(conv5_2)
    conv5_3 = Convolution2D(512, 3, 3, activation='relu', name='conv5_3')(pad5_3)
    pool5 = MaxPooling2D((2, 2), strides=(2, 2))(conv5_3)
    fc6 = Convolution2D(4096, 7, 7, activation='relu', name='fc6')(pool5)
    fc6_drop = Dropout(0.5)(fc6)

    model = Model(input=img, output=fc6_drop)
    bottleneck_features_train = model.predict_generator(generator, nb_train_samples)
    np.save(open('features.npy', 'w'), bottleneck_features_train)


def entrenamos_modelo(weights_path=None):

    train_data = np.load(open('features.npy'))
    print(train_data.shape)

    train_labels = np.array(
    [0] * (nb_train_samples / 8) + [1] * (nb_train_samples / 8) + [2] * (nb_train_samples / 8) + [3] * (
        nb_train_samples / 8) + [4] * (nb_train_samples / 8) + [5] * (nb_train_samples / 8) + [6] * (
        nb_train_samples / 8) + [7] * (nb_train_samples / 8))

    lbl1 = np.array([[1, 0, 0, 0, 0, 0, 0, 0], ] * 197)
    lbl2 = np.array([[0, 1, 0, 0, 0, 0, 0, 0], ] * 197)
    lbl3 = np.array([[0, 0, 1, 0, 0, 0, 0, 0], ] * 197)
    lbl4 = np.array([[0, 0, 0, 1, 0, 0, 0, 0], ] * 197)
    lbl5 = np.array([[0, 0, 0, 0, 1, 0, 0, 0], ] * 197)
    lbl6 = np.array([[0, 0, 0, 0, 0, 1, 0, 0], ] * 197)
    lbl7 = np.array([[0, 0, 0, 0, 0, 0, 1, 0], ] * 197)
    lbl8 = np.array([[0, 0, 0, 0, 0, 0, 0, 1], ] * 197)
    label = np.concatenate([lbl1, lbl2, lbl3, lbl4, lbl5, lbl6, lbl7, lbl8])
    '''train_labels --> loss='sparse_categorical_crossentropy'
       labels --> loss='categorical_crossentropy'
    '''

    #MODEL VGG (the old model)
    pad1_1 = ZeroPadding2D(padding=(1, 1), trainable=False, input_shape=(4096, 1, 1), name='in_train')(img)
    conv1_1 = Convolution2D(64, 3, 3, activation='relu', name='conv1_1', trainable=False)(pad1_1)
    pad1_2 = ZeroPadding2D(padding=(1, 1), trainable=False)(conv1_1)
    conv1_2 = Convolution2D(64, 3, 3, activation='relu', name='conv1_2', trainable=False)(pad1_2)
    pool1 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv1_2)

    pad2_1 = ZeroPadding2D((1, 1), trainable=False)(pool1)
    conv2_1 = Convolution2D(128, 3, 3, activation='relu', name='conv2_1', trainable=False)(pad2_1)
    pad2_2 = ZeroPadding2D((1, 1), trainable=False)(conv2_1)
    conv2_2 = Convolution2D(128, 3, 3, activation='relu', name='conv2_2', trainable=False)(pad2_2)
    pool2 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv2_2)

    pad3_1 = ZeroPadding2D((1, 1), trainable=False)(pool2)
    conv3_1 = Convolution2D(256, 3, 3, activation='relu', name='conv3_1', trainable=False)(pad3_1)
    pad3_2 = ZeroPadding2D((1, 1), trainable=False)(conv3_1)
    conv3_2 = Convolution2D(256, 3, 3, activation='relu', name='conv3_2', trainable=False)(pad3_2)
    pad3_3 = ZeroPadding2D((1, 1), trainable=False)(conv3_2)
    conv3_3 = Convolution2D(256, 3, 3, activation='relu', name='conv3_3', trainable=False)(pad3_3)
    pool3 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv3_3)

    pad4_1 = ZeroPadding2D((1, 1), trainable=False)(pool3)
    conv4_1 = Convolution2D(512, 3, 3, activation='relu', name='conv4_1', trainable=False)(pad4_1)
    pad4_2 = ZeroPadding2D((1, 1), trainable=False)(conv4_1)
    conv4_2 = Convolution2D(512, 3, 3, activation='relu', name='conv4_2', trainable=False)(pad4_2)
    pad4_3 = ZeroPadding2D((1, 1), trainable=False)(conv4_2)
    conv4_3 = Convolution2D(512, 3, 3, activation='relu', name='conv4_3', trainable=False)(pad4_3)
    pool4 = MaxPooling2D((2, 2), strides=(2, 2) , trainable=False)(conv4_3)

    pad5_1 = ZeroPadding2D((1, 1) , trainable=False)(pool4)
    conv5_1 = Convolution2D(512, 3, 3, activation='relu', name='conv5_1', trainable=False)(pad5_1)
    pad5_2 = ZeroPadding2D((1, 1), trainable=False)(conv5_1)
    conv5_2 = Convolution2D(512, 3, 3, activation='relu', name='conv5_2', trainable=False)(pad5_2)
    pad5_3 = ZeroPadding2D((1, 1), trainable=False)(conv5_2)
    conv5_3 = Convolution2D(512, 3, 3, activation='relu', name='conv5_3', trainable=False)(pad5_3)
    pool5 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv5_3)

    fc6 = Convolution2D(4096, 7, 7, activation='relu', name='fc6', trainable=False)(pool5)
    fc6_drop = Dropout(0.5)(fc6)

    #We TRAIN this layer
    fc7 = Convolution2D(4096, 1, 1, activation='relu', name='fc7', trainable=False)(fc6_drop)
    fc7_drop = Dropout(0.5)(fc7)
    fc8 = Convolution2D(2622, 1, 1, name='fc8', trainable=False)(fc7_drop)
    flat = Flatten()(fc8)
    out = Activation('softmax')(flat)
    model = Model(input=img, output=out)

    #We load the weight of the old model so when we construct ours we dont have to retrain all of it.
    if weights_path:
        model.load_weights(weights_path)

    # We construct our new model: first 14 layers of the old + two new ones. The new FC has to be trained and the Softmax layer too.
    fc7_n = Convolution2D(4096, 1, 1, activation='relu', name='fc7_n', trainable=True,  input_shape=train_data.shape[1:])(fc6_drop)
    fc7_drop_n = Dropout(0.5)(fc7_n)
    fc8_n = Convolution2D(8, 1, 1, name='fc8_n', trainable=False)(fc7_drop_n)
    flat_n = Flatten(name='flat_n')(fc8_n)
    out_n = Activation('softmax')(flat_n)
    model2 = Model(input=img, output=out_n)
    #model2.summary()


    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model2.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

    '''train_labels --> loss='sparse_categorical_crossentropy'
       labels --> loss='categorical_crossentropy'
    '''

    model2.fit(train_data,label, nb_epoch=nb_epoch, batch_size=64)
    print('Model Trained')

    #We save the weights so we can load them in our model
    model2.save_weights(pesos_entrenados)  # always save your weights after training or during training

    #We have two options: 1) Return the model here or in the vgg_trained_model Function
    return model2



if __name__ == "__main__":
    im = Image.open('A.J._Buckley.jpg')
    im = im.resize((224, 224))
    im = np.array(im).astype(np.float32)
    im = im.transpose((2, 0, 1))
    im = np.expand_dims(im, axis=0)

    # For the training stage
    img_width, img_height = 224, 224
    img = Input(shape=(3, img_height, img_width))
    train_data_dir = 'merge/train'
    pesos_entrenados='Modelo_Reentrenado.h5'
    # validation_data_dir = 'data/validation'
    nb_train_samples = 1576  # 197 per class and we have 8 classes (8 emotions)
    nb_validation_samples = 0
    nb_epoch = 20


    # Stages to construct the model
    bottleneck()   #Reduce the computational cost
    model=entrenamos_modelo('vggface_weights_tensorflow.h5')  #Construction of the model
  
    #model.summary()

    out = model.predict(im)
    print(out[0][0])

`

And i've got the following error:
File "freeze_2.py", line 258, in
model=entrenamos_modelo('vggface_weights_tensorflow.h5') #Construction of the model

File "freeze_2.py", line 168, in entrenamos_modelo
model2.fit(train_data,label, nb_epoch=nb_epoch, batch_size=64)

File "/imatge/psereno/workspace/venv-tfg/local/lib/python2.7/site-packages/keras/engine/training.py", line 1057, in fit
batch_size=batch_size)

File "/imatge/psereno/workspace/venv-tfg/local/lib/python2.7/site-packages/keras/engine/training.py", line 984, in _standardize_user_data
exception_prefix='model input')

File "/imatge/psereno/workspace/venv-tfg/local/lib/python2.7/site-packages/keras/engine/training.py", line 111, in standardize_input_data
str(array.shape))

Exception: Error when checking model input: expected input_2 to have shape (None, 3, 224, 224) but got array with shape (1576, 4096, 1, 1)
srun: error: c7: task 0: Exited with exit code 1

Do you know why i have an error with dimensions?

Thank you very much and sorry for the inconvenience.

PS: I am using a adapted version to tensorflow of the weight. I've done the conversion with the following code:
`
model = Model(input=img, output=out)
weights_path = 'vgg-face-keras.h5'
model.load_weights(weights_path)
ops = []
for layer in model.layers:
if layer.class.name in ['Convolution1D', 'Convolution2D', 'Convolution3D', 'AtrousConvolution2D']:
original_w = K.get_value(layer.W)
converted_w = convert_kernel(original_w)
ops.append(tf.assign(layer.W, converted_w).op)
K.get_session().run(ops)
model.save_weights('vggface_weights_tensorflow.h5')

`

PS2: If i comment the fit line there's no error..so i've assumed that the error is there.

PS3: I am using Keras(1.1.1) + Theano (0.9.0.dev4) + Tensorflow(0.10.0rc0)

@abskjha
Copy link

abskjha commented Feb 10, 2017

Hi @EncodeTS,

Thanks for sharing the vgg-face model for keras. I was looking for vgg-face model, and it really helped. Can you tell me how can convert matconvnet model to keras model? Is there any library for doing so?

Thanks.

@mzaradzki
Copy link

Hi @slashstar, @EncodeTS,

To convert the weights from MatConvNet I wrote this code :
https://github.com/mzaradzki/neuralnets/blob/master/vgg_faces_keras/vgg_faces_demo.ipynb

The weights are translated using the scipy.io.loadmat function

Hope this helps.

@oxydron
Copy link

oxydron commented Apr 4, 2017

@EncodeTS This model run on which python, tensorflow and keras versions?

@bogdan-kulynych
Copy link

@monktastic
Copy link

I'm running this with tensorflow as the backend (but with channels_first). The test picture gives me the wrong result. Did anyone else get it working?

@kushal-vangara
Copy link

@monktastic This works with tensorflow as backend when you change the configuration. Could you specify the error message that you are getting?

@jugatsingh
Copy link

@monktastic I have the exact same issue. Were you able to solve it?

@kaushik20-dev
Copy link

what is the dataset used for this model?

@wen-fei
Copy link

wen-fei commented Mar 14, 2018

Thanks, is anybody know the pytorch version?

@kristianSN
Copy link

Question: The mean subtraction done in

im[:,:,0] -= 129.1863

im[:,:,1] -= 104.7624

im[:,:,2] -= 93.5940

1: Are these the channel means obtained by the people who trained the VGG face?
2: If so, why are they commented?

@shahryarabaki
Copy link

I got this error when I ran the code:

VGG_Tensorflow_V2.py:15: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (3, 3), activation="relu")`
  model.add(Convolution2D(64, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:17: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (3, 3), activation="relu")`
  model.add(Convolution2D(64, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:21: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(128, (3, 3), activation="relu")`
  model.add(Convolution2D(128, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:23: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(128, (3, 3), activation="relu")`
  model.add(Convolution2D(128, 3, 3, activation='relu'))
Traceback (most recent call last):
  File "/home/mshafaei/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1576, in _create_c_op
    c_op = c_api.TF_FinishOperation(op_desc)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Negative dimension size caused by subtracting 2 from 1 for 'max_pooling2d_2/MaxPool' (op: 'MaxPool') with input shapes: [?,1,112,128].

@afruzan
Copy link

afruzan commented Nov 24, 2018

I also got this error when I ran vgg-face-keras.py:
tensorflow.python.framework.errors_impl.InvalidArgumentError: Negative dimension size caused by subtracting 2 from 1 for 'max_pooling2d_2/MaxPool' (op: 'MaxPool') with input shapes: [?,1,112,128].

@finger2vec
Copy link

I also got this error when I ran vgg-face-keras.py:
tensorflow.python.framework.errors_impl.InvalidArgumentError: Negative dimension size caused by subtracting 2 from 1 for 'max_pooling2d_2/MaxPool' (op: 'MaxPool') with input shapes: [?,1,112,128].

You could add channels_first as follow like this

from keras import backend as K
K.set_image_data_format('channels_first')

@finger2vec
Copy link

I got this error when I ran the code:

VGG_Tensorflow_V2.py:15: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (3, 3), activation="relu")`
  model.add(Convolution2D(64, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:17: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (3, 3), activation="relu")`
  model.add(Convolution2D(64, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:21: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(128, (3, 3), activation="relu")`
  model.add(Convolution2D(128, 3, 3, activation='relu'))
VGG_Tensorflow_V2.py:23: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(128, (3, 3), activation="relu")`
  model.add(Convolution2D(128, 3, 3, activation='relu'))
Traceback (most recent call last):
  File "/home/mshafaei/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1576, in _create_c_op
    c_op = c_api.TF_FinishOperation(op_desc)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Negative dimension size caused by subtracting 2 from 1 for 'max_pooling2d_2/MaxPool' (op: 'MaxPool') with input shapes: [?,1,112,128].

You could add channels_first as follow like this

from keras import backend as K
K.set_image_data_format('channels_first')

@wgsjack199213
Copy link

00000001
Here is a test picture,the probability of the picture belonging to the first class should be 0.99953598.

Sorry but I ran the vgg-face-keras.py program using theano backend and the maximum probability is only 0.207927 (class 36, not class 1). Did I make any mistake?

Thanks!

@afruzan
Copy link

afruzan commented Feb 17, 2019

Question: The mean subtraction done in
im[:,:,0] -= 129.1863
im[:,:,1] -= 104.7624
im[:,:,2] -= 93.5940
1: Are these the channel means obtained by the people who trained the VGG face?
2: If so, why are they commented?

these are my questions too... any one can answer how these means are computed? is it means we should also subtract these means from train images?

@aco92
Copy link

aco92 commented Apr 4, 2019

00000001
Here is a test picture,the probability of the picture belonging to the first class should be 0.99953598.

Sorry but I ran the vgg-face-keras.py program using theano backend and the maximum probability is only 0.207927 (class 36, not class 1). Did I make any mistake?

Thanks!

I run it with tf backend and max probability for that test image is 0.20376973. Does anybody know why we are not getting expected result?

Regards,
Aleksandar

@rhlshah
Copy link

rhlshah commented Jun 11, 2019

@EncodeTS,
I run it with tf backend but I am getting system error :" Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2".Pls help me out.

@moe-assal
Copy link

@rhlshah
don't worry about this problem. any program you run will work as usual. the reason this happening is that you are using a CPU that supports AVX2, which is a set of instructions that your CPU can do. it will accelerate some vector operations. you can install another binary of TensorFlow but i don't recommend that since it's a hard thing to do.

@wayne841213
Copy link

@sohaibwaheedgithub
Copy link

I am getting this error while using "vgg-face-keras.h5"
Negative dimension size caused by subtracting 2 from 1 for '{{node max_pooling2d_2/MaxPool}} = MaxPoolT=DT_FLOAT, data_format="NCHW", explicit_paddings=[], ksize=[1, 1, 2, 2], padding="VALID", strides=[1, 1, 2, 2]' with input shapes: [?,256,1,1].

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment