Skip to content

Instantly share code, notes, and snippets.

@ShiangYong
Created January 13, 2017 07:12
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ShiangYong/196701ef66a8898cc518a8d24a66c0a5 to your computer and use it in GitHub Desktop.
Save ShiangYong/196701ef66a8898cc518a8d24a66c0a5 to your computer and use it in GitHub Desktop.
Deep convolutional neural network to solve MNIST classification problem. Adapted code from Anuj Shah's tutorial https://www.youtube.com/watch?v=yDVap0lpYKg.
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, normalization, Convolution2D, MaxPooling2D
from keras.utils import np_utils
import matplotlib.pyplot as plt
from sklearn import metrics
import numpy as np
# input image dimensions
img_rows, img_cols = 28, 28
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'training samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to one-hot-vectors
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
i = 4800
plt.imshow(X_train[i, :, :,0], interpolation="nearest", cmap='gray')
plt.show()
print('Label:', Y_train[i:])
# number of convolutional filters to use
nb_filters = 32
nb_pool = 2
nb_conv = 3
# define model
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='same', input_shape=(img_rows, img_cols, 1)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Convolution2D(2*nb_filters, nb_conv, nb_conv, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(2*nb_filters, nb_conv, nb_conv, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Convolution2D(4*nb_filters, nb_conv, nb_conv, border_mode='valid'))
model.add(Activation('relu'))
model.add(Convolution2D(4*nb_filters, nb_conv, nb_conv, border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(6*128))
model.add(normalization.BatchNormalization())
model.add(Activation('sigmoid'))
model.add(Dense(nb_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta')
# train CNN
model.fit(X_train, Y_train, batch_size=128, nb_epoch=12, verbose=2, validation_split=0.2)
# evaluate performance of CNN
loss = model.evaluate(X_test, Y_test, verbose=0)
# make predictions using model
Y_predict = np.argmax(model.predict(X_test), axis=1)
Y_predict = Y_predict.astype('uint8')
f1score = metrics.f1_score(y_test, Y_predict, average='micro')
print('Test Loss:', loss)
print('F1 score:', f1score)
metrics.confusion_matrix(y_test, Y_predict)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment