Skip to content

Instantly share code, notes, and snippets.

@MinaGabriel
Created March 11, 2020 17:03
Show Gist options
  • Save MinaGabriel/944931805154a7dda2461696ce945fba to your computer and use it in GitHub Desktop.
Save MinaGabriel/944931805154a7dda2461696ce945fba to your computer and use it in GitHub Desktop.
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
inputs = keras.Input(shape=(784,), name='digits')
x = layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = layers.Dense(10, name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocess the data (these are Numpy arrays)
x_train = x_train.reshape(60000, 784).astype('float32') / 255
x_test = x_test.reshape(10000, 784).astype('float32') / 255
y_train = y_train.astype('float32')
y_test = y_test.astype('float32')
# Reserve 10,000 samples for validation
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
model.compile(optimizer=keras.optimizers.RMSprop(), # Optimizer
# Loss function to minimize
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
# List of metrics to monitor
metrics=['sparse_categorical_accuracy'])
print('# Fit model on training data')
history = model.fit(x_train, y_train,
batch_size=64,
epochs=3,
# We pass some validation for
# monitoring validation loss and metrics
# at the end of each epoch
validation_data=(x_val, y_val))
print('\nhistory dict:', history.history)
# Evaluate the model on the test data using `evaluate`
print('\n# Evaluate on test data')
results = model.evaluate(x_test, y_test, batch_size=128)
print('test loss, test acc:', results)
print('\n# Generate predictions for 3 samples')
k = np.array(x_train[4545])
print(k.shape)
k = k.reshape(1, 784)
predictions = model.predict(
k) # this will return a list of 10 elements pick the one with the heights probability
print(predictions)
print('predictions shape:', predictions.shape)
print(np.argmax(predictions)) # will return the index of the max probability
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment