Skip to content

Instantly share code, notes, and snippets.

@ishwor2048
Created July 18, 2019 21:22
Show Gist options
  • Save ishwor2048/c4cd0bfb42ec439f5e72071a6dd5146d to your computer and use it in GitHub Desktop.
Save ishwor2048/c4cd0bfb42ec439f5e72071a6dd5146d to your computer and use it in GitHub Desktop.
This project gives start to end python code to build a model to recognize hand-written digits. Thanks to Siraj Rawal for the amazing tutorial.
# Working on MNIST dataset, handwritten digits predictions
# Python 2 and 3 compatibility
from __future__ import print_function
# Importing tensorflow as tf
import tensorflow as tf
# Importing keras, simplified interface for building models
import keras
# Importing MNIST dataset, which is handwritten sample data
from tensorflow.keras.datasets import mnist
# importing sequential
from tensorflow.keras.models import Sequential
# Dense means fully connected layers, dropout is a technique remove unnecessary stuffs into respective layers
from tensorflow.keras.layers import Dense, Dropout, Flatten
# For convolution (images) and pooling is a technique to help..
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras import backend as K
# Mini batch gradient descent ftw
batch_size = 128
# 10 different characters
num_classes = 10
# for very short training time, choosing 10 epochs
epochs = 12
# input image dimensions, 28X28 pixel images
img_rows, img_cols = 28, 28
# the data downloaded, shuffled and split between train and test set
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Below code will assumes our data format for 3D data, "channels_last" assumes (conv_dim1, conv_dim2, conv_dim3,
# "channels_first" assumes (channels, conv_dim1, conv_dim2)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
print(x_train.shape)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# More reshaping of the models
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# Build our model
model = Sequential()
# Convolutional layer with rectified linear unit activation
model.add(Conv2D(32, kernel_size=(4, 4),
activation='relu',
input_shape=input_shape))
# again,
model.add(Conv2D(64, (4, 4), activation='relu'))
# Choose the best features via pooling
model.add(MaxPooling2D(pool_size=(2,2)))
# randomly turn neutrons on and off to improve convergence
model.add(Dropout(0.25))
# flatten since too many dimensions, we only want for application
model.add(Flatten())
# Fully connected to get all relevant data
model.add(Dense(128, activation='relu'))
# one more dropout for convergence' sake
model.add(Dropout(0.5))
# output a softmax to squash the matrix into output probabilities
model.add(Dense(num_classes, activation='softmax'))
# Adaptive learning rate (adaDelta) is a popular form of gradient descent
# categorical ce since we have multiple classes (10)
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.Adadelta(),
metrics=['accuracy'])
# train that model
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
# Evaluating performance of the model
score = model.evaluate(x_test, y_test, verbose=0)
print('test loss:', score[0])
print('Test accuracy:', score[1])
# Save the model
# Serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# Serialize weights to HDFS
model.save_weights("model.h5")
###############################################################################
###############################################################################
# Time to work on flask to deploy the model
# Importing required libraries
from flask import Flask, render_template, request
from scipy.misc import imsave, imread, imresize
import numpy as np
import keras.models
import re
import sys
import os
sys.path.append(os.path.abspath('./model'))
# init flask app
app = Flask(__name__)
global model, graph
model, graph = init()
def convertImage(imgData):
imgstr = re.search(r'base64.(.*'.imgData1).group(1))
with open('output.png', 'wb') as output:
output.write(imgstr.decode('base64'))
@app.route('/')
def index():
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def predict():
imgData = request.get_data()
convertImage(imgData)
x = imread('out.png', mode='L')
x = np.invert(x)
x = imresize(x, 28, 28)
x = x.reshape(1, 28, 28, 1)
with graph.as_default():
out = model.predict(x)
response = np.array_str(np.argmax(out))
return response
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
###############################################################################
###############################################################################
# Load.py file into same page
import numpy as np
import keras.models
from keras.models import model_from_json
from scipy.misc import imread, imresize, imshow
import tensorflow as tf
def init():
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
#load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded Model from Disk")
# Compile and evaluate loaded model
loaded_model.compile(loss = 'categorical_crossentropy')
# loss, accuracy = model.evaluate(X_test, y_test)
# print('loss:', loss)
# print('accuracy:', accuracy)
graph = tf.get_default_graph()
return loaded_model, graph
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment