Created
January 25, 2020 20:43
-
-
Save TheBojda/a7aaaec640e67576bcd0e82a7a6412b3 to your computer and use it in GitHub Desktop.
MINST GAN Example
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# MINST GAN Example | |
# based on https://www.tensorflow.org/tutorials/generative/dcgan | |
import numpy as np | |
import tensorflow as tf | |
import matplotlib.pyplot as plt | |
from tensorflow.keras import layers, models | |
import time | |
BUFFER_SIZE = 60000 | |
BATCH_SIZE = 300 | |
(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data() | |
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32') | |
train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1] | |
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) | |
discriminator = models.Sequential([ | |
layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1]), | |
layers.LeakyReLU(), | |
layers.Dropout(0.3), | |
layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'), | |
layers.LeakyReLU(), | |
layers.Dropout(0.3), | |
layers.Flatten(), | |
layers.Dense(1) | |
]) | |
discriminator.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), | |
optimizer=tf.keras.optimizers.Adam(1e-4)) | |
generator = models.Sequential([ | |
layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,)), | |
layers.BatchNormalization(), | |
layers.LeakyReLU(), | |
layers.Reshape((7, 7, 256)), | |
layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False), | |
layers.BatchNormalization(), | |
layers.LeakyReLU(), | |
layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False), | |
layers.BatchNormalization(), | |
layers.LeakyReLU(), | |
layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh') | |
]) | |
discriminator.trainable = False | |
gan = models.Sequential([ | |
generator, | |
discriminator | |
]) | |
gan.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(1e-4)) | |
EPOCHS = 50 | |
noise_dim = 100 | |
num_examples_to_generate = 16 | |
seed = tf.random.normal([num_examples_to_generate, noise_dim]) | |
def train_step(images): | |
noise = tf.random.normal([BATCH_SIZE, noise_dim]) | |
fake_images = generator.predict(noise) | |
real_images_y = np.ones((BATCH_SIZE, 1)) | |
discriminator.train_on_batch(images, real_images_y) | |
fake_images_y = np.zeros((BATCH_SIZE, 1)) | |
discriminator.train_on_batch(fake_images, fake_images_y) | |
gan_y = np.ones((BATCH_SIZE, 1)) | |
gan.train_on_batch(noise, gan_y) | |
for epoch in range(EPOCHS): | |
start = time.time() | |
for image_batch in train_dataset: | |
train_step(image_batch) | |
predictions = generator(seed, training=False) | |
plt.figure(figsize=(4, 4)) | |
for i in range(predictions.shape[0]): | |
plt.subplot(4, 4, i + 1) | |
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray') | |
plt.axis('off') | |
plt.show() | |
print('Time for epoch {} is {} sec'.format(epoch + 1, time.time() - start)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment