Skip to content

Instantly share code, notes, and snippets.

@miki998
Last active March 21, 2020 17:02
Show Gist options
  • Save miki998/f5125c94b299ba3191034d468cda938d to your computer and use it in GitHub Desktop.
Save miki998/f5125c94b299ba3191034d468cda938d to your computer and use it in GitHub Desktop.
Hyperas settings example for a CNN.
def data():
"""
Data providing function:
This function is separated from create_model() so that hyperopt
won't reload data for each evaluation run.
"""
#Encodings
encode = {0:'Attire',
1:'Decorationandsignage',
2:'Food',
3: 'misc'}
reverse = {v: k for k, v in encode.items()}
#Loading
PATH = 'train_images/'
image_names = os.listdir(PATH)
train = pd.read_csv('train.csv')
images = [cv2.imread(PATH+image_name) for image_name in tqdm(image_names)]
labels = [train.loc[train['Image'] == image_name]['Class'].iloc[0] for image_name in tqdm(image_names)]
def standard_image(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return cv2.resize(img,(48,48),interpolation=cv2.INTER_AREA).reshape((48,48,1))
#Encoding and preparation for training
X = np.array(images)
y_raw = np.array([reverse[label] for label in labels])
y = []
for cls in y_raw:
init = np.zeros(4)
init[cls] = 1
y.append(init)
y = np.array(y)
X.shape,y.shape
np.random.seed(40)
ratio = 0.8
indexes = range(len(images))
train_idx = random.sample(indexes,int(0.8*len(images)))
test_idx = np.delete(indexes, train_idx)
#Cross Validation
X_train_raw, y_train = X[train_idx], y[train_idx]
X_test_raw, y_test = X[test_idx], y[test_idx]
#Standardize_all elements
X_train = np.array([standard_image(X_train_raw[i]) for i in range(len(X_train_raw))])
X_test = np.array([standard_image(X_test_raw[i]) for i in range(len(X_test_raw))])
return X_train, y_train, X_test, y_test
def create_model(X_train,y_train,X_test,y_test):
# Initialising the CNN
model = Sequential()
# 1 - Convolution
model.add(Conv2D(64,(3,3), padding='same', input_shape=(48, 48,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout({{uniform(0,1)}}))
# 2nd Convolution layer
model.add(Conv2D(128,(5,5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout({{uniform(0,1)}}))
# 3rd Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
#1st Max Pool Layer
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0,1)}}))
# 4th Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
# 5h Convolution layer
if {{choice(['four', 'five'])}} == 'five':
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
#2nd Max Pool Layer
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0,1)}}))
# Flattening
model.add(Flatten())
# Fully connected layer 1st layer
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout({{uniform(0,1)}}))
# Fully connected layer 2nd layer
model.add(Dense({{choice([512, 1024])}}))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout({{uniform(0,1)}}))
model.add(Dense(4, activation='softmax'))
model.compile(optimizer={{choice(['rmsprop', 'adam', 'sgd'])}}, loss='categorical_crossentropy', metrics=['accuracy'])
result = model.fit(X_train, y_train,
batch_size={{choice([64, 128])}},
epochs=10,
verbose=2,
validation_data=(X_test, y_test))
validation_acc = np.amax(result.history['val_acc'])
print('Best validation acc of epoch:', validation_acc)
return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=create_model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials(),
notebook_name='gala_recog')
# Evaluate the model on the test data using `evaluate`
print('\n# Evaluate on test data')
results = best_model.evaluate(X_test, y_test, batch_size=128)
print('test loss, test acc:', results)
# Generate predictions (probabilities -- the output of the last layer)
# on new data using `predict`
print('\n# Generate predictions for 3 samples')
predictions = best_model.predict(X_test[:3])
print('predictions shape:', predictions.shape)
print(best_run)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment