Skip to content

Instantly share code, notes, and snippets.

@dloranc
Created March 28, 2017 20:54
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
Star You must be signed in to star a gist
Save dloranc/d7b7fbeb138e192916a7ae3a793ea477 to your computer and use it in GitHub Desktop.
MNIST Multi Layer Perceptron example with history and script interruption (modified example from Keras repository)
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import os
import keras
import cPickle
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.callbacks import Callback
from keras.models import load_model
class MyHistory(Callback):
def __init__(self):
super(Callback, self).__init__()
self.history = {'acc': [], 'loss': [], 'val_acc': [], 'val_loss': []}
def on_epoch_end(self, batch, logs={}):
for key in self.history.keys():
self.history[key].append(logs.get(key))
def load_history(filename):
with open(filename, 'r') as file:
history = cPickle.load(file)
return history
def save_history(history):
with open('history.pkl', 'wb') as file:
cPickle.dump(history, file)
def merge_history(previous, current):
history = { key: previous[key] + current[key] for key in current.keys() }
return history
batch_size = 128
num_classes = 10
epochs = 50
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if os.path.isfile('my_model.h5'):
print('Loading model...')
model = load_model('my_model.h5')
else:
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
model.summary()
previous_history = None
if os.path.isfile('history.pkl'):
previous_history = load_history('history.pkl')
previous_epochs = 0
if previous_history is not None:
previous_epochs = len(previous_history['acc'])
epochs = epochs - previous_epochs
my_history = MyHistory()
history = None
try:
if epochs > 0:
history = model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(x_test, y_test),
callbacks=[my_history])
else:
print('Training completed.')
except KeyboardInterrupt:
print()
print('You pressed CTRL+C')
history = my_history.history
finally:
model.save('my_model.h5')
if history != None and type(history) is not dict:
history = history.history
if previous_history != None and history != None:
history = merge_history(previous_history, history)
if history != None and len(history['acc']) > 0:
save_history(history)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment