Skip to content

Instantly share code, notes, and snippets.

@logankilpatrick
Created November 7, 2021 16:21
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save logankilpatrick/6774bf13d35c2ac913e98977987ccc96 to your computer and use it in GitHub Desktop.
Save logankilpatrick/6774bf13d35c2ac913e98977987ccc96 to your computer and use it in GitHub Desktop.
Train a basic CNN Model with a Convolutional, Maxpooling, Flatten, and Dense layers in Tensorflow
import os
import sys
import numpy as np
from tensorflow.keras import models, layers, callbacks
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.datasets import mnist
# Change this to the location of the database directories
DB_DIR = os.path.dirname(os.path.realpath(__file__))
# Import databases
sys.path.insert(1, DB_DIR)
from db_utils import get_imdb_dataset, get_speech_dataset
def Secure_Voice_Channel(func):
"""Define Secure_Voice_Channel decorator."""
def execute_func(*args, **kwargs):
print('Established Secure Connection.')
returned_value = func(*args, **kwargs)
print("Ended Secure Connection.")
return returned_value
return execute_func
@Secure_Voice_Channel
def generic_vns_function(classes, lr):
"""Generic Deep Learning Model generator."""
model = models.Sequential()
#TODO: Add a Convolutional Layer
model.add(layers.Conv2D(64, kernel_size=4, activation='relu'))
# Kernal is the matrix size for the convolution, so 3x3.
#TODO: Add a MaxPool2D layer
model.add(layers.MaxPooling2D())
#TODO: Add a Flatten Layer
model.add(layers.Flatten())
model.add(layers.Dense(classes, kernel_initializer='normal',
activation='softmax'))
opt = Adam(lr=lr)
model.compile(loss='categorical_crossentropy', optimizer=opt,
metrics=['accuracy'])
return model
def train_model(model, epochs, batch_size, X_train, y_train, X_test, y_test):
"""Generic Deep Learning Model training function."""
cb = [callbacks.EarlyStopping(monitor='val_loss', patience=3)]
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs,
batch_size=batch_size, verbose=1, callbacks=cb)
scores = model.evaluate(X_test, y_test, verbose=2)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))
return model
def choose_dataset(dataset_type):
"""Select dataset based on string variable."""
if dataset_type == "nlp":
return get_imdb_dataset(dir=DB_DIR)
elif dataset_type == "computer_vision":
(X_train, y_train), (X_test, y_test) = mnist.load_data()
elif dataset_type == "speech_recognition":
(X_train, y_train), (X_test, y_test), (_, _) = get_speech_dataset()
else:
raise ValueError("Couldn't find dataset.")
(X_train, X_test) = normalize_dataset(dataset_type, X_train, X_test)
(X_train, y_train), (X_test, y_test) = reshape_dataset(X_train, y_train, X_test, y_test)
return (X_train, y_train), (X_test, y_test)
def normalize_dataset(string, X_train, X_test):
"""Normalize speech recognition and computer vision datasets."""
if string is "computer vision":
X_train = X_train / 255
X_test = X_test / 255
else:
mean = np.mean(X_train)
std = np.std(X_train)
X_train = (X_train-std)/mean
X_test = (X_test-std)/mean
return (X_train, X_test)
def reshape_dataset(X_train, y_train, X_test, y_test):
"""Reshape Computer Vision and Speech datasets."""
# num_pixels = X_test.shape[1]*X_test.shape[2]
# Change Reshape Function to the format (num_samples, x_axis, y_axis, channels)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2], 1).astype('float32')
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2], 1).astype('float32')
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
return (X_train, y_train), (X_test, y_test)
def main():
# Hyperparameters
layer_units = 1000
epochs = 80
batch_size = 200
lr = 0.0001
# Dataset : "computer_vision" or "speech_recognition"
dataset = "computer_vision"
# Import Datasets
(X_train, y_train), (X_test, y_test) = choose_dataset(dataset)
# Generate and train model
# Removed the layers argument since we are using Conv layers now.
model = generic_vns_function(y_train.shape[1], lr)
trained_model = train_model(model, epochs, batch_size, X_train, y_train, X_test, y_test)
# # Save model to h5 file
trained_model.save('models/model_%s_a3.h5' % dataset)
return None
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment