Last active
April 25, 2018 21:35
-
-
Save limitpointinf0/99fd4f605902e8416af31619aeba6d27 to your computer and use it in GitHub Desktop.
Easy Keras Models and Plotting
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import tensorflow | |
import keras | |
from keras.models import Sequential | |
from keras.layers import Dense, Dropout | |
from keras.utils import to_categorical | |
from keras.callbacks import EarlyStopping | |
from keras.optimizers import SGD, Adam | |
from keras.callbacks import History | |
from sklearn.model_selection import train_test_split | |
import matplotlib.pyplot as plt | |
#Define a function which creates a keras model with hidden layers, activation functions, and dropout rates | |
def simple_NN(input_shape, nodes_per=[60], hidden=0, out=2, act_out='softmax', act_hid='relu', drop=True, d_rate=0.1): | |
"""Generate a keras neural network with arbitrary number of hidden layers, activation functions, dropout rates, etc""" | |
model = Sequential() | |
#adding first hidden layer with 60 nodes (first value in nodes_per list) | |
model.add(Dense(nodes_per[0],activation=act_hid,input_shape=input_shape)) | |
if drop: | |
model.add(Dropout(d_rate)) | |
try: | |
if hidden != 0: | |
for i,j in zip(range(hidden), nodes_per[1:]): | |
model.add(Dense(j,activation=act_hid)) | |
if drop: | |
model.add(Dropout(d_rate)) | |
model.add(Dense(out,activation=act_out)) | |
return(model) | |
except: | |
print('Error in generating hidden layers') | |
#Define a function to plot historical data on key statistics of a keras model | |
def plt_perf(name, p_loss=False, p_acc=False, val=False, size=(15,15), save=False): | |
"""Plot model statistics for keras models""" | |
if p_loss or p_acc: | |
if p_loss: | |
plt.figure(figsize = size) | |
plt.title('Loss') | |
plt.plot(name.history['loss'], 'b', label='loss') | |
if val: | |
plt.plot(name.history['val_loss'], 'r', label='val_loss') | |
plt.xlabel('Epochs') | |
plt.ylabel('Value') | |
plt.legend() | |
plt.show() | |
if save: | |
plt.savefig('loss.png') | |
if p_acc: | |
plt.figure(figsize = size) | |
plt.title('Accuracy') | |
plt.plot(name.history['acc'], 'b', label='acc') | |
if val: | |
plt.plot(name.history['val_acc'], 'r', label='val_acc') | |
plt.xlabel('Epochs') | |
plt.ylabel('Value') | |
plt.legend() | |
plt.show() | |
if save: | |
plt.savefig('acc.png') | |
else: | |
print('No plotting since all parameters set to false.') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
See: https://www.kaggle.com/limitpointinf0/leaf-classifier-knn-nn
for example of implementation