Skip to content

Instantly share code, notes, and snippets.

@aarora4
Created August 2, 2021 20:45
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save aarora4/df7c4546cb329810733b70e74f8035d4 to your computer and use it in GitHub Desktop.
Save aarora4/df7c4546cb329810733b70e74f8035d4 to your computer and use it in GitHub Desktop.
Simplest DCGAN Implementation in Python
train_images_art = []
for directory in glob.glob('./images/images/*'):
for filename in glob.glob(directory + '/*'):
image = cv2.imread(filename)
image = cv2.resize(image, (64, 64))
image = tf.keras.preprocessing.image.img_to_array(image)
train_images_art.append(image)
train_images_art = np.array(train_images_art, dtype="float")
train_images_art = (train_images_art - 127.5) / 127.5
# The dimension of z
noise_dim = 100
batch_size = 16
steps_per_epoch = 548
epochs = 100
channels = 3
save_path = 'dcgan-images-2'
def create_generator():
generator = tf.keras.Sequential()
# Starting size
d = 4
generator.add(tf.keras.layers.Dense(d*d*256, kernel_initializer=tf.keras.initializers.RandomNormal(0, 0.02), input_dim=noise_dim))
generator.add(tf.keras.layers.LeakyReLU(0.2))
# 4x4x256
generator.add(tf.keras.layers.Reshape((d, d, 256)))
# 8x8x128
generator.add(tf.keras.layers.Conv2DTranspose(128, (4, 4), strides=2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(0, 0.02)))
generator.add(tf.keras.layers.LeakyReLU(0.2))
# 16x16*128
generator.add(tf.keras.layers.Conv2DTranspose(128, (4, 4), strides=2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(0, 0.02)))
generator.add(tf.keras.layers.LeakyReLU(0.2))
# 32x32x128
generator.add(tf.keras.layers.Conv2DTranspose(128, (4, 4), strides=2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(0, 0.02)))
generator.add(tf.keras.layers.LeakyReLU(0.2))
generator.add(tf.keras.layers.Conv2DTranspose(128, (4, 4), strides=2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(0, 0.02)))
generator.add(tf.keras.layers.LeakyReLU(0.2))
# 32x32x3
generator.add(tf.keras.layers.Conv2D(3, (3, 3), padding='same', activation='tanh', kernel_initializer=tf.keras.initializers.RandomNormal(0, 0.02)))
generator.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.0002, 0.5))
return generator
def create_discriminator():
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Conv2D(64, (3, 3), padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(0, 0.02), input_shape=(64, 64, 3)))
discriminator.add(tf.keras.layers.LeakyReLU(0.2))
discriminator.add(tf.keras.layers.Conv2D(128, (3, 3), strides=2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(0, 0.02)))
discriminator.add(tf.keras.layers.LeakyReLU(0.2))
discriminator.add(tf.keras.layers.Conv2D(128, (3, 3), strides=2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(0, 0.02)))
discriminator.add(tf.keras.layers.LeakyReLU(0.2))
discriminator.add(tf.keras.layers.Conv2D(256, (3, 3), strides=2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(0, 0.02)))
discriminator.add(tf.keras.layers.LeakyReLU(0.2))
discriminator.add(tf.keras.layers.Flatten())
discriminator.add(tf.keras.layers.Dropout(0.4))
discriminator.add(tf.keras.layers.Dense(1, activation='sigmoid', input_shape=(64, 64, 3)))
discriminator.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.0002, 0.5))
return discriminator
discriminator = create_discriminator()
generator = create_generator()
discriminator.trainable = False
# Link the two models to create the GAN
gan_input = tf.keras.Input(shape=(noise_dim,))
fake_image = generator(gan_input)
gan_output = discriminator(fake_image)
gan = tf.keras.Model(gan_input, gan_output)
gan.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.0002, 0.5))
# Display images, and save them if the epoch number is specified
def show_images(noise, epoch=None):
generated_images = generator.predict(noise)
plt.figure(figsize=(10, 10))
for i, image in enumerate(generated_images):
plt.subplot(10, 10, i+1)
if channels == 1:
plt.imshow(np.clip(image.reshape((64, 64)), 0.0, 1.0), cmap='gray')
else:
image = ((image + 1) / 2)
plt.imshow(np.clip(image.reshape((64, 64, channels)), 0.0, 1.0))
plt.axis('off')
plt.tight_layout()
if epoch != None:
plt.savefig(f'{save_path}/gan-images_epoch-{epoch}.png')
# Constant noise for viewing how the GAN progresses
static_noise = np.random.normal(0, 1, size=(100, noise_dim))
# Training loop
temp_epochs = 50
for epoch in range(temp_epochs):
for batch in range(steps_per_epoch):
noise = np.random.normal(0, 1, size=(batch_size, noise_dim))
real_x = train_images_art[np.random.randint(0, train_images_art.shape[0], size=batch_size)]
fake_x = generator.predict(noise)
x = np.concatenate((real_x, fake_x))
disc_y = np.zeros(2*batch_size)
disc_y[:batch_size] = 0.9
d_loss = discriminator.train_on_batch(x, disc_y)
y_gen = np.ones(batch_size)
g_loss = gan.train_on_batch(noise, y_gen)
print(f'Epoch: {epoch} \t Discriminator Loss: {d_loss} \t\t Generator Loss: {g_loss}')
if epoch % 2 == 0:
show_images(static_noise, epoch)
discriminator.save('dcdiscriminator.h5')
generator.save('dcgenerator.h5')
gan.save('dcgan.h5')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment