Skip to content

Instantly share code, notes, and snippets.

@agnesmm
Created September 27, 2017 16:58
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save agnesmm/b621195fa0b02f7027ac19c10a87864c to your computer and use it in GitHub Desktop.
Save agnesmm/b621195fa0b02f7027ac19c10a87864c to your computer and use it in GitHub Desktop.
from keras import backend as K
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Lambda, Conv2D
from keras.layers import GlobalAveragePooling2D, Input, Dropout
from keras.layers.convolutional import MaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam, RMSprop, SGD
import numpy as np
import urllib
path = '/home/ubuntu/nbs/data/dogscats_redux/'
model_path = path + 'vgg16_weights_tf_dim_ordering_tf_kernels.h5'
target_size=(224, 224)
batch_size=32
def preprocess_image(im):
vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32)
#im = cv2.resize(cv2.imread(path), (224, 224)).astype(np.float32)
im = (im - vgg_mean)
return im[:, ::-1] # RGB to BGR
def create_vgg16(x=None):
# we initialize the model
model = Sequential()
# Conv Block 1
model.add(Lambda(preprocess_image, input_shape=(224,224,3), output_shape=(224,224,3)))
model.add(Conv2D(64, (3, 3), input_shape=(224,224,3), activation='relu', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Conv Block 2
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Conv Block 3
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Conv Block 4
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Conv Block 5
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# FC layers
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dense(4096, activation='relu'))
model.add(Dense(1000, activation='softmax'))
return model
def get_batches(directory, target_size=target_size, batch_size=batch_size, shuffle=False):
datagen = ImageDataGenerator()
return datagen.flow_from_directory(directory=directory,
target_size=target_size,
batch_size=batch_size,
class_mode='categorical',
shuffle=shuffle)
def get_weights(path, download=False):
urllib.urlretrieve ("https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5", model_path)
batches = get_batches(path+'train', shuffle=True)
valid_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False)
initial_model = create_vgg16()
initial_model.load_weights(model_path)
x = Dense(batches.num_class, activation='softmax')(initial_model.layers[-2].output)
model = Model(initial_model.input, x)
for layer in initial_model.layers: layer.trainable=False
opt = Adam(lr=0.001)
model.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit_generator(batches, steps_per_epoch=batches.samples//batch_size, nb_epoch=1,
validation_data=valid_batches, validation_steps=valid_batches.samples//batch_size)
for layer in model.layers[:10]:
layer.trainable = False
for layer in model.layers[10:]:
layer.trainable = True
opt = SGD(lr=10e-5)
model.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit_generator(batches, steps_per_epoch=batches.samples//batch_size, nb_epoch=2,
validation_data=valid_batches, validation_steps=valid_batches.samples//batch_size)
@jaggi
Copy link

jaggi commented May 10, 2018

hi,
If i run the code i am getting the following error:

2018-05-10 14:35:48.197082: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX
Traceback (most recent call last):
File "ex3.py", line 89, in
x = Dense(batches.num_class, activation='softmax')(initial_model.layers[-2].output)
AttributeError: 'DirectoryIterator' object has no attribute 'num_class'

can u plz help me to solve this error

Thank you..

@limasigor2
Copy link

@jaggi did you fixed your bug?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment