Last active
December 19, 2017 02:25
-
-
Save ruggeri/b3eda75d3ba8236662cfcb60a04ee6e3 to your computer and use it in GitHub Desktop.
Keras Generation Example
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import keras.backend as K | |
from keras.datasets import mnist | |
from keras.layers import Dense, Flatten, Input, Reshape | |
from keras.models import Model | |
from keras.optimizers import Adam | |
import numpy as np | |
# == This code trains a denoising autoencoder. == | |
input_tensor = Input(shape = (28, 28)) | |
flattened_input = Flatten()(input_tensor) | |
hidden_layer = Dense(28*28, activation = 'linear') | |
hidden_output = hidden_layer(flattened_input) | |
output_layer = Dense(28 * 28, activation = 'linear') | |
output_tensor = output_layer(hidden_output) | |
reshaped_tensor = Reshape((28, 28))(output_tensor) | |
encoder_training_model = Model([input_tensor], [reshaped_tensor]) | |
LEARNING_RATE = 0.001 | |
optimizer = Adam(lr = LEARNING_RATE) | |
encoder_training_model.compile( | |
loss = 'mean_squared_error', | |
optimizer = optimizer, | |
) | |
(x_train, y_train), (x_test, y_test) = mnist.load_data() | |
x_mean = np.mean(x_train) | |
x_stddev = np.std(x_train) | |
# Normalize x values. | |
x_train = (x_train - x_mean) / x_stddev | |
x_test = (x_test - x_mean) / x_stddev | |
encoder_training_model.fit( | |
# I inject noise into the x variable. | |
x_train + np.random.normal(), | |
x_train, | |
validation_data = (x_test, x_test), | |
epochs = 2, | |
) | |
# == Let's *encode* an image. == | |
# | |
# This creates a model where the output will be hidden_output. | |
encoding_model = Model(input_tensor, hidden_output) | |
target_encoding = encoding_model.predict( | |
# add a dimension of size 1 in front to make this a batch of one. | |
np.expand_dims(x_train[0, :, :], axis = 0) | |
).reshape((28 * 28)) | |
# == Let's *generate* an image. == | |
# | |
# Our input will be a dummy input. | |
dummy_input_tensor = Input(shape = (1,)) | |
# We'll do a fake dense layer. The weights of this dense layer will | |
# encode the image. | |
image_layer = Dense( | |
28 * 28, # num pixels | |
activation = 'linear', # no activation function | |
use_bias = False, | |
) | |
image_tensor = image_layer(dummy_input_tensor) | |
# We'll make a new Model, but use the old hidden_output tensor. | |
generator_hidden_output = hidden_layer(image_tensor) | |
generator_model = Model(dummy_input_tensor, generator_hidden_output) | |
for layer in generator_model.layers: | |
layer.trainable = False | |
image_layer.trainable = True | |
LEARNING_RATE = 0.01 | |
optimizer = Adam(lr = LEARNING_RATE) | |
generator_model.compile( | |
loss = 'mean_squared_error', | |
optimizer = optimizer, | |
) | |
# This now trains the generator model's weights to produce the hidden | |
# output. | |
generator_model.fit( | |
np.ones((1, 1)), | |
np.expand_dims(target_encoding, axis = 0), | |
epochs = 1000, | |
) | |
# Extract generated image. | |
generated_image_data = K.eval(image_layer.weights[0]) | |
# Save the generated image. | |
def save_image(fname, data): | |
from PIL import Image | |
# Throw away the first dimension (batch idx) | |
data = data.reshape((28, 28)) | |
data = (data * x_stddev) + x_mean | |
# Keep in the proper range of values. | |
data = np.clip(data, 0, 256) | |
# Convert to bytes. | |
data = data.astype(np.uint8) | |
img = Image.fromarray(data) | |
img.save(fname) | |
save_image('output.png', generated_image_data) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment