Skip to content

Instantly share code, notes, and snippets.

@ychervonyi
Last active August 24, 2017 21:40
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ychervonyi/d7d119492e09e2b89bcdae98bdb8eb6a to your computer and use it in GitHub Desktop.
Save ychervonyi/d7d119492e09e2b89bcdae98bdb8eb6a to your computer and use it in GitHub Desktop.
MLP on IRIS using Keras with different size of hidden layer
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.regularizers import l2
from keras.utils import np_utils
import matplotlib.pyplot as plt
import keras
# Prepare data
iris = sns.load_dataset("iris")
X = iris.values[:, 0:4]
y = iris.values[:, 4]
# Make test and train set
train_X, test_X, train_y, test_y = train_test_split(X, y,
train_size=0.7,
random_state=0)
################################
# Evaluate Keras Neural Network
################################
# Make ONE-HOT
def one_hot_encode_object_array(arr):
'''One hot encode a numpy array of objects (e.g. strings)'''
uniques, ids = np.unique(arr, return_inverse=True)
return np_utils.to_categorical(ids, len(uniques))
train_y_ohe = one_hot_encode_object_array(train_y)
test_y_ohe = one_hot_encode_object_array(test_y)
l2_reg = 0.0
# dropouts = [0.2, 0.4, 0.6, 0.8, 1.0]
dropout = 1.0
hiddens = [1, 16, 128, 1024, 4096]
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
accs, val_accs = [], []
for hidden in hiddens:
model = Sequential()
model.add(Dense(hidden,
input_shape=(4,),
activation="relu",
W_regularizer=l2(l2_reg)))
model.add(Dropout(dropout))
model.add(Dense(3, activation="softmax"))
model.compile(loss='categorical_crossentropy',
metrics=['accuracy'],
optimizer=keras.optimizers.Adam(lr=0.01))
print(model.summary())
# Actual modelling
history = model.fit(train_X, train_y_ohe,
batch_size=16,
epochs=200,
verbose=1,
validation_data=(test_X, test_y_ohe))
score = model.evaluate(test_X, test_y_ohe, verbose=0)
print('Test loss with hidden size %d: %.4f' % (hidden, score[0]))
print('Test accuracy with hidden size %d: %.4f' % (hidden, score[1]))
accs.append(history.history['acc'])
val_accs.append(history.history['val_acc'])
# summarize history for accuracy
for i in range(len(accs)):
plt.plot(accs[i], '--', label='Train hidden size '+str(hiddens[i]), color=colors[i])
plt.plot(val_accs[i], '-', label='Val hidden size '+str(hiddens[i]), color=colors[i])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend()
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment