Skip to content

Instantly share code, notes, and snippets.

@josiahcoad
Last active February 13, 2020 19:13
Show Gist options
  • Save josiahcoad/115094a44d31050d1aa2b279ab24d133 to your computer and use it in GitHub Desktop.
Save josiahcoad/115094a44d31050d1aa2b279ab24d133 to your computer and use it in GitHub Desktop.
Keras NN
# Install TensorFlow
import tensorflow as tf
# Load and prepare the MNIST dataset.
# Convert the samples from integers to floating-point numbers:
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Build the tf.keras.Sequential model by stacking layers.
# Choose an optimizer and loss function for training:
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
# For each example the model returns a vector of "logits
# or "log-odds" scores, one for each class.
predictions = model(x_train[:1]).numpy()
predictions
#array([[ 0.02841388, -0.3284333 , -0.450701 , 0.0453613 , -0.40066364,
# -0.763066 , -0.61733794, 0.32509428, -0.55119157, -0.9239696 ]],
# dtype=float32)
# The tf.nn.softmax function converts these logits to "probabilities"
# for each class:
tf.nn.softmax(predictions).numpy()
# array([[0.13781358, 0.09645288, 0.08535227, 0.14016907, 0.08973172,
# 0.06245349, 0.07225128, 0.18541236, 0.07719205, 0.05317128]],
# dtype=float32)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
# The Model.fit method adjusts the model parameters to minimize the loss:
model.fit(x_train, y_train, epochs=5)
#Train on 60000 samples
#Epoch 1/5
#60000/60000 - 4s 70us/sample - loss: 0.2927 - accuracy: 0.9141
#Epoch 2/5
#60000/60000 - 4s 67us/sample - loss: 0.1425 - accuracy: 0.9580
#Epoch 3/5
#60000/60000 - 4s 66us/sample - loss: 0.1059 - accuracy: 0.9679
#Epoch 4/5
#60000/60000 - 4s 67us/sample - loss: 0.0876 - accuracy: 0.9730
#Epoch 5/5
#60000/60000 - 4s 66us/sample - loss: 0.0747 - accuracy: 0.9771
# The Model.evaluate method checks the models performance, usually on a "Validation-set".
model.evaluate(x_test, y_test, verbose=2)
# 10000/10000 - 1s - loss: 0.0780 - accuracy: 0.9762
# If you want your model to return a probability, you can wrap
# the trained model, and attach the softmax to it:
probability_model = tf.keras.Sequential([
model,
tf.keras.layers.Softmax()
])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment