Skip to content

Instantly share code, notes, and snippets.

View mjbhobe's full-sized avatar

Manish Bhobe mjbhobe

  • Mumbai, India
  • 19:03 (UTC +05:30)
View GitHub Profile
from keras.models import Sequential
from keras.layers import Dense, MaxPooling2D, Conv2D, Flatten
def build_keras_model():
# this is the same configuration that Francois Chollet used in his cats vs dogs problem
model = Sequential()
# CNN layers
model.add(Conv2D(filters=64, activation='relu', kernel_size=ksize, strides=strides, padding='same',
input_shape=(image_height, image_width, num_channels)))
model.add(MaxPooling2D(pool_size=psize))
from keras.datasets.cifar10 import load_data
# load the data from keras datasets library
(train_data, train_labels), (test_data, test_labels) = load_data()
print(train_data.shape, train_labels.shape, test_data.shape, test_labels.shape)
# a dict that helps us decode the output label to text
cifar10_labels = {
0 : 'Airplane',
1 : 'Automobile',
2 : 'Bird',
3 : 'Cat',
4 : 'Deer',
5 : 'Dog',
6 : 'Frog',
7 : 'Horse',
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(123) # so your results match mine
%matplotlib inline
rand_20 = np.random.randint(0, train_data.shape[0],20)
sample_data = train_data[rand_20]
sample_labels = train_labels[rand_20].ravel()
row_count, col_count = 2, 10
# convert the data into float32 types
train_data = train_data.astype('float32')
test_data = test_data.astype('float32')
# mean-normalize the image data (not the labels!)
# we take the mean & stdev of the training dataset along 4 dimensions
mean = np.mean(train_data,axis=(0,1,2,3))
std = np.std(train_data,axis=(0,1,2,3))
train_data = (train_data-mean)/(std)
# we apply the training set's mean & stdev to test dataset
# shuffle the training dataset & set aside val_perc % of rows as validation data
for _ in range(5):
indexes = np.random.permutation(len(train_data))
# randomly sorted!
train_data = train_data[indexes]
train_labels_cat = train_labels_cat[indexes]
# now we will set-aside val_perc% of the train_data/labels as cross-validation sets
val_perc = 0.10
# some more globals...
num_features = train_data.shape[1]
num_epochs = 15
batch_size = 100
ksize, kernel_size = 3, 3
psize, pool_size = 2, 2
strides = 1
from keras.models import Sequential
from keras.layers import Dense, MaxPooling2D, Conv2D, Flatten
def build_keras_model():
model = Sequential()
# CNN layer
model.add(Conv2D(filters=32, kernel_size=3, strides=1, padding='same', activation='elu',
input_shape=(image_height, image_width, num_channels)))
model.add(Conv2D(filters=32, kernel_size=3, strides=1, padding='same', activation='elu'))
model.add(MaxPooling2D(pool_size=2))
kr_history = kr_base_model.fit(train_data2, train_labels_cat2,
epochs=15,
batch_size=100,
validation_data=(val_data,val_labels_cat))
test_loss, test_accuracy = kr_base_model.evaluate(test_data, test_labels_cat, batch_size=batch_size)
print('Test loss: %8.4f accuracy: %.3f' % (test_loss, test_accuracy))