Simple Convolutional Net
from tensorflow import keras | |
from tensorflow.keras.datasets import cifar10 | |
from tensorflow.keras.models import Sequential | |
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Activation, BatchNormalization | |
from tensorflow.keras.preprocessing.image import ImageDataGenerator | |
import numpy as np | |
# Hyperparameters | |
NUM_CLASSES = 10 | |
ACTIVATION = "relu" | |
LEARNING_RATE = 1e-3 | |
BATCH_SIZE = 64 | |
EPOCHS = 64 | |
LOSS_FUNCTION = "categorical_crossentropy" | |
LAMBDA = 1e-4 | |
REGULARIZER = keras.regularizers.l2(LAMBDA) | |
# Loading CIFAR-10 | |
(x_train, y_train), (x_test, y_test) = cifar10.load_data() | |
# Normalizing training samples | |
mean_train = np.mean(x_train) | |
std_train = np.std(x_train) | |
x_train = (x_train - mean_train) / std_train | |
# Normalizing test samples | |
mean_test = np.mean(x_test) | |
std_test = np.std(x_test) | |
x_test = (x_test - mean_test) / std_test | |
# One-hot encoding training and testing labels | |
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES) | |
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES) | |
# Model architecture | |
model = Sequential() | |
model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=REGULARIZER, input_shape=x_train.shape[1:])) | |
model.add(Activation(ACTIVATION)) | |
model.add(BatchNormalization()) | |
model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=REGULARIZER)) | |
model.add(MaxPooling2D(pool_size=(2,2))) | |
model.add(Activation(ACTIVATION)) | |
model.add(BatchNormalization()) | |
model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=REGULARIZER)) | |
model.add(Activation(ACTIVATION)) | |
model.add(BatchNormalization()) | |
model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=REGULARIZER)) | |
model.add(MaxPooling2D(pool_size=(2,2))) | |
model.add(Activation(ACTIVATION)) | |
model.add(BatchNormalization()) | |
model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=REGULARIZER)) | |
model.add(Activation(ACTIVATION)) | |
model.add(BatchNormalization()) | |
model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=REGULARIZER)) | |
model.add(MaxPooling2D(pool_size=(2,2))) | |
model.add(Activation(ACTIVATION)) | |
model.add(BatchNormalization()) | |
model.add(Flatten()) | |
model.add(Dense(32, activation=ACTIVATION)) | |
model.add(Dense(NUM_CLASSES, activation='softmax')) | |
model.summary() # Print out a summary | |
# Data augmentation | |
datagen = ImageDataGenerator(rotation_range=45, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True) | |
datagen.fit(x_train) | |
augmented_data = datagen.flow(x_train, y_train, batch_size=BATCH_SIZE) | |
# Training | |
adam = keras.optimizers.Adam(lr=LEARNING_RATE) | |
model.compile(loss=LOSS_FUNCTION, optimizer=adam, metrics=['accuracy']) | |
model.fit_generator(augmented_data, epochs=EPOCHS, verbose=1, validation_data=(x_test, y_test)) | |
# Saving the model to disk | |
model_json = model.to_json() | |
with open('model.json', 'w') as json_file: | |
json_file.write(model_json) | |
model.save_weights('model.h5') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment