Last active
January 25, 2020 20:08
-
-
Save TheBojda/9e67906ec478c8c005fb23f768a5e48b to your computer and use it in GitHub Desktop.
MINST GAN Example
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# MINST GAN Example | |
# based on https://www.tensorflow.org/tutorials/generative/dcgan | |
import tensorflow as tf | |
import matplotlib.pyplot as plt | |
from tensorflow.keras import layers, models | |
import time | |
BUFFER_SIZE = 60000 | |
BATCH_SIZE = 256 | |
(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data() | |
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32') | |
train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1] | |
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) | |
discriminator = models.Sequential([ | |
layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1]), | |
layers.LeakyReLU(), | |
layers.Dropout(0.3), | |
layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'), | |
layers.LeakyReLU(), | |
layers.Dropout(0.3), | |
layers.Flatten(), | |
layers.Dense(1) | |
]) | |
generator = models.Sequential([ | |
layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,)), | |
layers.BatchNormalization(), | |
layers.LeakyReLU(), | |
layers.Reshape((7, 7, 256)), | |
layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False), | |
layers.BatchNormalization(), | |
layers.LeakyReLU(), | |
layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False), | |
layers.BatchNormalization(), | |
layers.LeakyReLU(), | |
layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh') | |
]) | |
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) | |
def discriminator_loss(real_output, fake_output): | |
real_loss = cross_entropy(tf.ones_like(real_output), real_output) | |
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output) | |
total_loss = real_loss + fake_loss | |
return total_loss | |
def generator_loss(fake_output): | |
return cross_entropy(tf.ones_like(fake_output), fake_output) | |
generator_optimizer = tf.keras.optimizers.Adam(1e-4) | |
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) | |
EPOCHS = 50 | |
noise_dim = 100 | |
num_examples_to_generate = 16 | |
seed = tf.random.normal([num_examples_to_generate, noise_dim]) | |
@tf.function | |
def train_step(images): | |
noise = tf.random.normal([BATCH_SIZE, noise_dim]) | |
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: | |
generated_images = generator(noise, training=True) | |
real_output = discriminator(images, training=True) | |
fake_output = discriminator(generated_images, training=True) | |
gen_loss = generator_loss(fake_output) | |
disc_loss = discriminator_loss(real_output, fake_output) | |
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) | |
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) | |
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) | |
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) | |
for epoch in range(EPOCHS): | |
start = time.time() | |
for image_batch in train_dataset: | |
train_step(image_batch) | |
predictions = generator(seed, training=False) | |
plt.figure(figsize=(4, 4)) | |
for i in range(predictions.shape[0]): | |
plt.subplot(4, 4, i + 1) | |
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray') | |
plt.axis('off') | |
plt.show() | |
print('Time for epoch {} is {} sec'.format(epoch + 1, time.time() - start)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment