Skip to content

Instantly share code, notes, and snippets.

@IFeelBloated
Created August 6, 2018 16:53
Show Gist options
  • Save IFeelBloated/7d1494ffdb561a86105e09bffdb7c96a to your computer and use it in GitHub Desktop.
Save IFeelBloated/7d1494ffdb561a86105e09bffdb7c96a to your computer and use it in GitHub Desktop.
import IO
import numpy as np
from keras import models, layers, optimizers, initializers
import keras.backend as K
batch_size = 32
r = IO.LoadRawBinaryGrayscaleSequence('r.bin', 128, 128, 6442) # range: [0.0, 1.0]
g = IO.LoadRawBinaryGrayscaleSequence('g.bin', 128, 128, 6442) # range: [0.0, 1.0]
b = IO.LoadRawBinaryGrayscaleSequence('b.bin', 128, 128, 6442) # range: [0.0, 1.0]
def GetGenerator():
Input = layers.Input((128,))
x = layers.Reshape((1, 1, 128))(Input)
x = layers.Conv2DTranspose(1024, (4,4), strides=1, padding='valid', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(x)
x = layers.BatchNormalization(axis=3, epsilon=1e-5, momentum=0.1)(x)
x = layers.Activation('relu')(x)
x = layers.Conv2DTranspose(512, (4,4), strides=2, padding='same', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(x)
x = layers.BatchNormalization(axis=3, epsilon=1e-5, momentum=0.1)(x)
x = layers.Activation('relu')(x)
x = layers.Conv2DTranspose(256, (4,4), strides=2, padding='same', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(x)
x = layers.BatchNormalization(axis=3, epsilon=1e-5, momentum=0.1)(x)
x = layers.Activation('relu')(x)
x = layers.Conv2DTranspose(128, (4,4), strides=2, padding='same', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(x)
x = layers.BatchNormalization(axis=3, epsilon=1e-5, momentum=0.1)(x)
x = layers.Activation('relu')(x)
x = layers.Conv2DTranspose(64, (4,4), strides=2, padding='same', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(x)
x = layers.BatchNormalization(axis=3, epsilon=1e-5, momentum=0.1)(x)
x = layers.Activation('relu')(x)
x = layers.Conv2DTranspose(3, (4,4), strides=2, padding='same', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(x)
x = layers.Activation('tanh')(x)
m = models.Model(Input, x)
return m
def GetDiscriminator():
Input = layers.Input((128,128,3))
x = layers.Conv2D(64, (4,4), strides=2, padding='same', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(Input)
x = layers.advanced_activations.LeakyReLU(0.2)(x)
x = layers.Conv2D(128, (4,4), strides=2, padding='same', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(x)
x = layers.BatchNormalization(axis=3, epsilon=1e-5, momentum=0.1)(x)
x = layers.advanced_activations.LeakyReLU(0.2)(x)
x = layers.Conv2D(256, (4,4), strides=2, padding='same', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(x)
x = layers.BatchNormalization(axis=3, epsilon=1e-5, momentum=0.1)(x)
x = layers.advanced_activations.LeakyReLU(0.2)(x)
x = layers.Conv2D(512, (4,4), strides=2, padding='same', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(x)
x = layers.BatchNormalization(axis=3, epsilon=1e-5, momentum=0.1)(x)
x = layers.advanced_activations.LeakyReLU(0.2)(x)
x = layers.Conv2D(1024, (4,4), strides=2, padding='same', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(x)
x = layers.BatchNormalization(axis=3, epsilon=1e-5, momentum=0.1)(x)
x = layers.advanced_activations.LeakyReLU(0.2)(x)
x = layers.Conv2D(1, (4,4), strides=1, padding='valid', use_bias=False, kernel_initializer=initializers.RandomNormal(stddev=0.02))(x)
x = layers.Reshape((1,))(x)
m = models.Model(Input, x)
return m
def build_discriminator_model(g, d):
g.trainable = False
d.trainable = True
real_img = layers.Input(shape=(128, 128, 3))
latent = layers.Input(shape=(128,))
fake_img = g(latent)
real_rating = d(real_img)
fake_rating = d(fake_img)
average_fake_rating = layers.Lambda(lambda x: K.mean(x, axis=0))(fake_rating)
average_real_rating = layers.Lambda(lambda x: K.mean(x, axis=0))(real_rating)
relative_real_rating = layers.subtract([real_rating, average_fake_rating])
relative_real_rating = layers.Activation("sigmoid")(relative_real_rating)
relative_fake_rating = layers.subtract([fake_rating, average_real_rating])
relative_fake_rating = layers.Activation("sigmoid")(relative_fake_rating)
discriminator = models.Model([real_img, latent], [relative_real_rating, relative_fake_rating])
discriminator.compile(loss=['binary_crossentropy', 'binary_crossentropy'], optimizer=optimizers.Adam(.0002, .5, .999))
return discriminator
def build_generator_model(g, d):
g.trainable = True
d.trainable = False
real_img = layers.Input(shape=(128, 128, 3))
latent = layers.Input(shape=(128,))
fake_img = g(latent)
real_rating = d(real_img)
fake_rating = d(fake_img)
average_fake_rating = layers.Lambda(lambda x: K.mean(x, axis=0))(fake_rating)
average_real_rating = layers.Lambda(lambda x: K.mean(x, axis=0))(real_rating)
relative_fake_rating = layers.subtract([fake_rating, average_real_rating])
relative_fake_rating = layers.Activation("sigmoid")(relative_fake_rating)
relative_real_rating = layers.subtract([real_rating, average_fake_rating])
relative_real_rating = layers.Activation("sigmoid")(relative_real_rating)
generator = models.Model([real_img, latent], [relative_fake_rating, relative_real_rating])
generator.compile(loss=['binary_crossentropy', 'binary_crossentropy'], optimizer=optimizers.Adam(.0002, .5, .999))
return generator
def get_batch():
indx = [np.random.randint(0, 6442) for _ in range(batch_size)]
batch = np.zeros((batch_size,128, 128, 3), dtype='float32')
for x in range(batch_size):
batch[x, :, :, 0] = r[indx[x], :, :, 0]
batch[x, :, :, 1] = g[indx[x], :, :, 0]
batch[x, :, :, 2] = b[indx[x], :, :, 0]
return 2 * (batch - 0.5)
raw_g = GetGenerator()
raw_d = GetDiscriminator()
G = build_generator_model(raw_g, raw_d)
D = build_discriminator_model(raw_g, raw_d)
def train(x):
dist_real = np.ones((batch_size, 1))
dist_fake = np.zeros((batch_size, 1))
real_img = get_batch()
latent = np.random.normal(0, 1, (batch_size, 128))
d_loss = D.train_on_batch([real_img, latent], [dist_real, dist_fake])
real_img = get_batch()
latent = np.random.normal(0, 1, (batch_size, 128))
g_loss = G.train_on_batch([real_img, latent], [dist_real, dist_fake])
print ("%d [D loss: %f] [G loss: %f]" % (x, d_loss[0], g_loss[0]))
G.summary()
D.summary()
for x in range(10000000000):
for y in range(500):
train(x * 500 + y)
raw_g.save_weights('GW/G'+str(x)+'.h5')
raw_d.save_weights('DW/D'+str(x)+'.h5')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment