Skip to content

Instantly share code, notes, and snippets.

@jfsantos
Created May 6, 2016 19:58
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jfsantos/e1ff730415b9b8b4c6e83cb161046850 to your computer and use it in GitHub Desktop.
Save jfsantos/e1ff730415b9b8b4c6e83cb161046850 to your computer and use it in GitHub Desktop.
from __future__ import print_function
from keras.models import Model, Sequential
from keras.layers import Input, Dense, TimeDistributed
from keras.layers.core import Reshape, Flatten, Dropout, TimeDistributedDense
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Convolution2D
from keras.layers.recurrent import LSTM
from keras.optimizers import Adam
from keras import backend as K
import numpy as np
import argparse
from utils import recursive_glob
import theano
theano.config.exception_verbosity = 'high'
theano.config.optimizer = 'fast_compile'
modelA = Sequential()
modelA.add(Dense(3*64*64, input_shape=(100,)))
modelB = Sequential()
modelB.add(Reshape((3, 64, 64), input_shape=(3*64*64,)))
modelB.add(Convolution2D(256, 5, 5, subsample=(2, 2), border_mode='same'))
modelB.add(BatchNormalization())
modelB.add(LeakyReLU(0.2))
modelB.add(Flatten())
modelB.add(Dense(1, activation='sigmoid'))
modelB.trainable = False
modelC = Sequential()
modelC.add(modelA)
modelC.add(modelB)
modelB.trainable = True
# Dummy data
X = np.random.randn(100, 3, 64, 64)
y = np.ones(100)
print('Compiling models')
adam=Adam(lr=0.0002, beta_1=0.5, beta_2=0.999, epsilon=1e-08)
modelA.compile(loss='binary_crossentropy', optimizer=adam)
modelC.compile(loss='binary_crossentropy', optimizer=adam)
modelB.trainable = True
modelB.compile(loss='binary_crossentropy', optimizer=adam)
print('Training')
for batch_id in range(10):
target = y[10*batch_id:10*(batch_id+1)]
noise = np.random.randn(10, 100)
processed = modelA.predict(noise)
print('Training discriminator')
d_loss = modelB.train_on_batch(processed, target)
print('d_loss={}'.format(d_loss))
print('Training generator')
g_loss = modelC.train_on_batch(noise, target)
print('g_loss={}'.format(g_loss))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment