Skip to content

Instantly share code, notes, and snippets.

@baraldilorenzo
Created January 16, 2016 12:57
Show Gist options
  • Save baraldilorenzo/8d096f48a1be4a2d660d to your computer and use it in GitHub Desktop.
Save baraldilorenzo/8d096f48a1be4a2d660d to your computer and use it in GitHub Desktop.
VGG-19 pre-trained model for Keras

##VGG19 model for Keras

This is the Keras model of the 19-layer network used by the VGG team in the ILSVRC-2014 competition.

It has been obtained by directly converting the Caffe model provived by the authors.

Details about the network architecture can be found in the following arXiv paper:

Very Deep Convolutional Networks for Large-Scale Image Recognition
K. Simonyan, A. Zisserman
arXiv:1409.1556

In the paper, the VGG-19 model is denoted as configuration E. It achieves 7.5% top-5 error on ILSVRC-2012-val, 7.3% top-5 error on ILSVRC-2012-test.

Please cite the paper if you use the models.

###Contents:

model and usage demo: see vgg-19_keras.py

weights: vgg19_weights.h5

from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
import cv2, numpy as np
def VGG_19(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == "__main__":
im = cv2.resize(cv2.imread('cat.jpg'), (224, 224)).astype(np.float32)
im[:,:,0] -= 103.939
im[:,:,1] -= 116.779
im[:,:,2] -= 123.68
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
# Test pretrained model
model = VGG_19('vgg19_weights.h5')
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
out = model.predict(im)
print np.argmax(out)
@rahman-mdatiqur
Copy link

Hello @baraldilorenzo
I tried to run this model with the weight file on only 10 images. but it failed showing the following error message.
I would highly appreciate if you please advise.

Traceback (most recent call last):
File "imagenet_cnn_call.py", line 59, in
vgg_19_model.compile(optimizer=sgd, loss='categorical_crossentropy')
File "/usr/local/lib/python2.7/dist-packages/keras/models.py", line 408, in compile
self.y_train = self.get_output(train=True)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/containers.py", line 128, in get_output
return self.layers[-1].get_output(train)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 949, in get_output
X = self.get_input(train)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 159, in get_input
previous_output = self.previous.get_output(train=train)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 624, in get_output
X = self.get_input(train)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 159, in get_input
previous_output = self.previous.get_output(train=train)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 949, in get_output
X = self.get_input(train)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 159, in get_input
previous_output = self.previous.get_output(train=train)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 624, in get_output
X = self.get_input(train)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 159, in get_input
previous_output = self.previous.get_output(train=train)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 949, in get_output
X = self.get_input(train)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 159, in get_input
previous_output = self.previous.get_output(train=train)
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 822, in get_output
return K.flatten(X)
File "/usr/local/lib/python2.7/dist-packages/keras/backend/tensorflow_backend.py", line 272, in flatten
x = tf.reshape(x, [-1, np.prod(x.get_shape()[1:].as_list())])
File "/usr/local/lib/python2.7/dist-packages/numpy/core/fromnumeric.py", line 2481, in prod
out=out, keepdims=keepdims)
File "/usr/local/lib/python2.7/dist-packages/numpy/core/_methods.py", line 35, in _prod
return umr_prod(a, axis, dtype, out, keepdims)
TypeError: unsupported operand type(s) for *: 'int' and 'NoneType'

@baraldilorenzo
Copy link
Author

@atique81 could you post your code?

@morningsky
Copy link

@baraldilorenzo Hi, I have some problem want to get your help, thanks very much!
if I want to use this net ,Do I have to use the image size which is 224*224, and the classes number is 1000?
because I found if I don't follow this , it will cause some Exception.

@baraldilorenzo
Copy link
Author

@morningsky of course yes, otherwise the shape of weights won't match.
As far as the number of classes is concerned, you could change the shape of the last layer without loading pre-trained weights, and then fine-tune on your dataset.
If you change the input size, instead, the shape of the first fully connected layer would be different, so you would have to fine-tune all FC layers.

@morningsky
Copy link

@baraldilorenzo I am happy to get your reply,You means I must change all FC layer below this
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
?
Can you give me some code example? Thanks!

@baraldilorenzo
Copy link
Author

@morningsky I mean at least you should not load the weights of the FC layers, because the weights shape of the first Dense layer is going to change, and weights learned on the other FC layers would make no sense anymore. For code, since this is an usage question, you should refer to the keras-users group.

@morningsky
Copy link

@baraldilorenzo Thank you! Now I know how to use this net for my data, but I have meet some error.
`
File "D:\Anaconda2\lib\site-packages\keras\models.py", line 1184, in fit
X = [data[name] for name in self.input_order]

IndexError: only integers, slices (:), ellipsis (...), numpy.newaxis (None) and integer or boolean arrays are valid indices`

my code as below


from keras.models import Sequential, Graph
from keras.layers import Convolution2D, ZeroPadding2D, MaxPooling2D
from keras.layers.core import Flatten, Dense, Dropout
from keras.optimizers import SGD
import keras.backend as K
from keras.utils import np_utils, generic_utils
import numpy as np
from PIL import Image

def load_data(csv_file,X,y):
    path_and_labels = []
    f = open(csv_file, 'rb')
    for line in f:
        line = line.strip('\r\n')
        path, label = line.split(',')
        label = int(label)
        path_and_labels.append((path, label))
    f.close()

    j = 0
    for path_and_label in path_and_labels:
        path, label = path_and_label
        img = Image.open(path)
        arr_img = np.asarray(img, dtype='float32')
        arr_img = arr_img.transpose(2,0,1).reshape(3,128,128)
        X[j,:,:,:] = arr_img[:,:,:]
        y[j] = label
        j+=1

    return X,y

train_data = np.empty((4300,3,128,128),dtype='float32')
train_label = np.empty((4300,),dtype="uint8")
test_data = np.empty((860,3,128,128),dtype='float32')
test_label = np.empty((860,),dtype="uint8")
train_data,train_label = load_data(r'E:\Data\face_100\detect\train.csv',train_data,train_label)
test_data,test_label = load_data(r'E:\Data\face_100\detect\test.csv',test_data,test_label)

nb_classes = 43
train_label = np_utils.to_categorical(train_label,nb_classes)

img_width, img_height = 128, 128


first_layer = ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height))

model = Sequential()
model.add(first_layer)
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))




layer_dict = dict([(layer.name, layer) for layer in model.layers])



import h5py

weights_path = 'vgg16_weights.h5'

f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
    if k >= len(model.layers):
        break
    g = f['layer_{}'.format(k)]
    weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
    model.layers[k].set_weights(weights)
f.close()
print('Model loaded.')



graph_m = Graph()
graph_m.add_input('my_inp', input_shape=(3, img_width, img_height))
graph_m.add_node(model, name='your_model', input='my_inp')
graph_m.add_node(Flatten(), name='Flatten', input='your_model')
graph_m.add_node(Dense(4096, activation='relu'), name='Dense1',      input='Flatten')
graph_m.add_node(Dropout(0.5), name='Dropout1', input='Dense1')
graph_m.add_node(Dense(4096, activation='relu'), name='Dense2',  input='Dropout1')
graph_m.add_node(Dropout(0.5), name='Dropout2', input='Dense2')
graph_m.add_node(Dense(43, activation='softmax'), name='Final', input='Dropout2')
graph_m.add_output(name='out1', input='Final')
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
graph_m.compile(optimizer=sgd, loss={'out1': 'categorical_crossentropy'})
graph_m.fit(train_data,train_label,nb_epoch=10,shuffle=True,verbose=1,validation_split=0.2)

`
Can you give me some notice? Thank you very much!

@rex-yue-wu
Copy link

@baraldilorenzo Thank you for sharing this converted model files. I tested this model on imagenet data, but predicted labels do not make any sense, i.e. when I look up a predicted label index in the imagenet metadata file, the corresponding class description is definitely different from the image content.

Here are five cat images I used to test the above claim.

In imagenet, below are the classes I found related to 'cat'

([[44]], [u'n02445715'], [u'skunk, polecat, wood pussy'], [u'American musteline mammal typically ejecting an intensely malodorous fluid when startled; in some classifications put in a separate subfamily Mephitinae'], [[0]], [[]], [[1]], [[1300]])
([[95]], [u'n02123597'], [u'Siamese cat, Siamese'], [u'a slender short-haired blue-eyed breed of cat having a pale coat with dark ears paws face and tail tip'], [[0]], [[]], [[1]], [[1300]])
([[182]], [u'n02443114'], [u'polecat, fitch, foulmart, foumart, Mustela putorius'], [u'dark brown mustelid of woodlands of Eurasia that gives off an unpleasant odor when threatened'], [[0]], [[]], [[1]], [[1300]])
([[199]], [u'n02497673'], [u'Madagascar cat, ring-tailed lemur, Lemur catta'], [u'small lemur having its tail barred with black'], [[0]], [[]], [[0]], [[1300]])
([[1158]], [u'n02121808'], [u'domestic cat, house cat, Felis domesticus, Felis catus'], [u'any domesticated member of the genus Felis'], [[5]], [[8, 10, 55, 95, 174]], [[2]], [[0]])
([[1230]], [u'n02121620'], [u'cat, true cat'], [u'feline mammal usually having thick soft fur and no ability to roar: domestic cats; wildcats'], [[2]], [[1158, 1231]], [[3]], [[0]])
([[1232]], [u'n02127808'], [u'big cat, cat'], [u'any of several large cats typically able to roar and living in the wild'], [[6]], [[30, 76, 85, 153, 190, 206]], [[2]], [[0]])

Below are the top5 predictions (node_index, probability) for these two images:

0 (285, 0.5447191596031189), 1 (281, 0.29963484406471252), 2 (282, 0.12506027519702911), 3 (287, 0.0028702393174171448), 4 (753, 0.0022252483759075403)
0 (285, 0.34820479154586792), 1 (281, 0.31037920713424683), 2 (282, 0.28222346305847168), 3 (287, 0.0062280762940645218), 4 (904, 0.0044251754879951477)
0 (282, 0.52378344535827637), 1 (281, 0.24879153072834015), 2 (285, 0.11023161560297012), 3 (287, 0.036640208214521408), 4 (478, 0.014831849373877048)
0 (281, 0.26362878084182739), 1 (285, 0.25445196032524109), 2 (282, 0.10163164883852005), 3 (722, 0.097987674176692963), 4 (728, 0.032452180981636047)
0 (281, 0.65300887823104858), 1 (282, 0.17187526822090149), 2 (285, 0.15421499311923981), 3 (287, 0.010831587016582489), 4 (811, 0.00067715055774897337)

For me, it seems that the output node indices do not directly correspond to the imagenet metadata labels, but if we can have the correct mapping then we know the right physical class of a given image ( see all these cat images have high response on output node 285, 281, 282). Do you have any idea where I can get the correct mapping file from the keras output node index to the imagenet class label?

@baraldilorenzo
Copy link
Author

@rex-yue-wu To me, it seems that the network is doing a good job. Your mapping between class ids and synsets, instead, is wrong.

class 285 corresponds to synset n02123597 - "Siamese cat, Siamese"
class 282 corresponds to synset n02123045 - "tabby, tabby cat"
class 281 corresponds to synset n02120505 - "grey fox, gray fox, Urocyon cinereoargenteus"

The mapping between ILSVRC class ids and Imagenet synsets can be found here: http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz

@aalgorry
Copy link

aalgorry commented Oct 3, 2016

Hi Lorenzo, thank you very much !!!, Great work !

@lucasdavid
Copy link

This only seems to work for TH backend. You'll need to transpose the matrices if you're using TF.
Also, just for the reference, the VGG16 and VGG19 models pre-trained over imagenet are now available through the applications module of Keras:

from keras.layers import Input
from keras.applications import VGG19

model = VGG19(weights='imagenet')
model.predict(...)

@v7t-codes
Copy link

Hey, I get this error!

/usr/local/lib/python2.7/dist-packages/keras/backend/theano_backend.py:1203:
UserWarning: DEPRECATION: the 'ds' parameter is not going to exist anymore as it is going to be replaced by the parameter 'ws'.
mode='max')
/usr/local/lib/python2.7/dist-packages/keras/backend/theano_backend.py:1203: UserWarning: DEPRECATION: the 'st' parameter is not going to exist anymore as it is going to be replaced by the parameter 'stride'.
mode='max')
/usr/local/lib/python2.7/dist-packages/keras/backend/theano_backend.py:1203: UserWarning: DEPRECATION: the 'padding' parameter is not going to exist anymore as it is going to be replaced by the parameter 'pad'.
mode='max')
Traceback (most recent call last):
File "VGG19.py", line 73, in
model = VGG_19('vgg19_weights.h5')
File "VGG19.py", line 53, in VGG_19
model.add(Dense(4096, activation='relu'))
File "/usr/local/lib/python2.7/dist-packages/keras/models.py", line 307, in add
output_tensor = layer(self.outputs[0])
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 484, in call
self.build(input_shapes[0])
File "/usr/local/lib/python2.7/dist-packages/keras/layers/core.py", line 604, in build
name='{}_W'.format(self.name))
File "/usr/local/lib/python2.7/dist-packages/keras/initializations.py", line 59, in glorot_uniform
return uniform(shape, s, name=name)
File "/usr/local/lib/python2.7/dist-packages/keras/initializations.py", line 32, in uniform
return K.random_uniform_variable(shape, -scale, scale, name=name)
File "/usr/local/lib/python2.7/dist-packages/keras/backend/theano_backend.py", line 111, in random_uniform_variable
dtype=dtype, name=name)
File "/usr/local/lib/python2.7/dist-packages/keras/backend/theano_backend.py", line 39, in variable
value = np.asarray(value, dtype=dtype)
File "/usr/local/lib/python2.7/dist-packages/numpy/core/numeric.py", line 482, in asarray
return array(a, dtype, copy=False, order=order)

MemoryError

Thanks in advance!

@DanlanChen
Copy link

How can I used it for grey scaled image?

@AdityaSoni19031997
Copy link

AdityaSoni19031997 commented Aug 30, 2017

For gray scale we need to change the channel to 1 ...
For colored its 3...
We need to play around with the input parameter of the very first layer...(3->1)

@manimtechrs
Copy link

I have 4 classes to predict . can i use VGG-16 model to predict classes .

@flyingduck92
Copy link

My tensorflow version is 1.8.0

I cannot load the vgg-19 weight file.
This is the error i got:
You are trying to load a weight file containing 0 layers into a model with 19 layers.

@NEUUCAS
Copy link

NEUUCAS commented Mar 17, 2019

My tensorflow version is 1.8.0

I cannot load the vgg-19 weight file.
This is the error i got:
You are trying to load a weight file containing 0 layers into a model with 19 layers.

I have the same error. How to fix it?

@NimmiGhetia
Copy link

NimmiGhetia commented Apr 15, 2019

I am getting this error, can you help me resolve the error. I haven't change the code

Using TensorFlow backend.
vgg-19_keras.py:11: UserWarning: Update your Conv2D call to the Keras 2 API: Conv2D(64, (3, 3), activation="relu")
model.add(Convolution2D(64, 3, 3, activation='relu'))
WARNING:tensorflow:From /opt/conda/envs/py2/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
vgg-19_keras.py:13: UserWarning: Update your Conv2D call to the Keras 2 API: Conv2D(64, (3, 3), activation="relu")
model.add(Convolution2D(64, 3, 3, activation='relu'))
vgg-19_keras.py:17: UserWarning: Update your Conv2D call to the Keras 2 API: Conv2D(128, (3, 3), activation="relu")
model.add(Convolution2D(128, 3, 3, activation='relu'))
vgg-19_keras.py:19: UserWarning: Update your Conv2D call to the Keras 2 API: Conv2D(128, (3, 3), activation="relu")
model.add(Convolution2D(128, 3, 3, activation='relu'))
Traceback (most recent call last):
File "vgg-19_keras.py", line 73, in
model = VGG_19('vgg19_weights.h5')
File "vgg-19_keras.py", line 20, in VGG_19
model.add(MaxPooling2D((2,2), strides=(2,2)))
File "/opt/conda/envs/py2/lib/python2.7/site-packages/keras/engine/sequential.py", line 181, in add
output_tensor = layer(self.outputs[0])
File "/opt/conda/envs/py2/lib/python2.7/site-packages/keras/engine/base_layer.py", line 457, in call
output = self.call(inputs, **kwargs)
File "/opt/conda/envs/py2/lib/python2.7/site-packages/keras/layers/pooling.py", line 205, in call
data_format=self.data_format)
File "/opt/conda/envs/py2/lib/python2.7/site-packages/keras/layers/pooling.py", line 268, in _pooling_function
pool_mode='max')
File "/opt/conda/envs/py2/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py", line 3978, in pool2d
data_format=tf_data_format)
File "/opt/conda/envs/py2/lib/python2.7/site-packages/tensorflow/python/ops/nn_ops.py", line 2748, in max_pool
name=name)
File "/opt/conda/envs/py2/lib/python2.7/site-packages/tensorflow/python/ops/gen_nn_ops.py", line 5137, in max_pool
data_format=data_format, name=name)
File "/opt/conda/envs/py2/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "/opt/conda/envs/py2/lib/python2.7/site-packages/tensorflow/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/opt/conda/envs/py2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3300, in create_op
op_def=op_def)
File "/opt/conda/envs/py2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1823, in init
control_input_ops)
File "/opt/conda/envs/py2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1662, in _create_c_op
raise ValueError(str(e))
ValueError: Negative dimension size caused by subtracting 2 from 1 for 'max_pooling2d_2/MaxPool' (op: 'MaxPool') with input shapes: [?,1,112,128].

@b-hakim
Copy link

b-hakim commented Apr 20, 2019

I got the same issue:

ValueError: You are trying to load a weight file containing 0 layers into a model with 19 layers.

@b-hakim
Copy link

b-hakim commented Apr 20, 2019

@NimmiGhetia your problem is in the keras,
edit the
~/.keras/keras.json and change the "image_data_format" to "image_data_format": "channels_first"

@Rouxkein
Copy link

***this my model run complete , i using vgg19 to train data cifar 10 ,you can refer it

from keras.datasets import cifar10
import cv2
import random
import numpy as np
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
import cv2, numpy as np
import tensorflow as tf
#load data
(x_train,y_train),(x_test,y_test)=cifar10.load_data()

train data load 0 to 2000

ind_train = random.sample(list(range(x_train.shape[0])), 2000)
x_train = x_train[ind_train]
y_train = y_train[ind_train]

test data load 0 to 2000

ind_test = random.sample(list(range(x_test.shape[0])), 2000)
x_test = x_test[ind_test]
y_test = y_test[ind_test]
#resize image
def resize_data(data):
data_upscale=np.zeros((data.shape[0],224,224,3))
for i,img in enumerate(data):
l_img=cv2.resize(img,dsize=(224,224),interpolation=cv2.INTER_CUBIC)
data_upscale[i]=l_img
return data_upscale

resize train and test data

x_train_r = resize_data(x_train)
x_test_r = resize_data(x_test)

make explained variable hot-encoded

train_y= to_categorical(y_train)
test_y = to_categorical(y_test)

def VGG_19(weights=None, include_top=True, classes=10,input_shape=(224,224,3)):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(224,224,3)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))

model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))

model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))

model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))

model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))

model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
return model

model = VGG_19(input_shape = (224, 224, 3), classes = 10)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history=model.fit(x_train_r , train_y, epochs=5, batch_size=20, validation_data=(x_test_r , test_y ))
model.summary()

@mounirlazid
Copy link

I am looking for a simple example of a vgg16 model to classify 3D medical images. thank you for your help

@zhouhongyuthu
Copy link

HI,the h5 link is no longer valid. Could you fix it?

@ThatIsMyUsername
Copy link

HI,the h5 link is no longer valid. Could you fix it?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment