Skip to content

Instantly share code, notes, and snippets.

@peune
Created February 16, 2018 16:06
Show Gist options
  • Save peune/15ad1fe94fdc3429ab9d04940ad4dde9 to your computer and use it in GitHub Desktop.
Save peune/15ad1fe94fdc3429ab9d04940ad4dde9 to your computer and use it in GitHub Desktop.
import os
import numpy as np
np.random.seed(os.getpid()) #1337) # for reproducibility
from tqdm import tqdm
from tqdm import trange
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
###################################################################################################
from keras.datasets import mnist, cifar10
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Activation, Flatten, Reshape, ActivityRegularization, Dropout, Dot
from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers.merge import Add, Concatenate, Multiply
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.pooling import AveragePooling2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras import regularizers
from keras import initializers
from keras.initializers import RandomNormal, Constant
from keras.callbacks import Callback
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD, Adam
from keras.utils import np_utils
import keras.backend as K
from keras.engine.topology import Layer
from keras.applications.vgg16 import VGG16
#org_model = VGG16(weights='imagenet')
org_model = VGG16(weights=None)
org_model.load_weights('vgg16.weights.h5')
# hyp: org_model is VGG16
def create_base_model( org_model, input_shape ):
base_model = Sequential()
l = org_model.get_layer('block1_conv1')
base_model.add( Conv2D( l.filters, l.kernel_size, strides=l.strides, padding=l.padding,
use_bias=l.use_bias, data_format=l.data_format, activation=l.activation,
name='my_block1_conv1',
input_shape=input_shape ) )
base_model.layers[-1].set_weights( l.get_weights() )
l = org_model.get_layer('block1_conv2')
base_model.add( Conv2D( l.filters, l.kernel_size, strides=l.strides, padding=l.padding,
use_bias=l.use_bias, data_format=l.data_format, activation=l.activation,
name='my_block1_conv2' ) )
base_model.layers[-1].set_weights( l.get_weights() )
l = org_model.get_layer('block1_pool')
base_model.add( MaxPooling2D(pool_size=l.pool_size, padding=l.padding, strides=l.strides,
name='my_block1_pool' ) )
l = org_model.get_layer('block2_conv1')
base_model.add( Conv2D( l.filters, l.kernel_size, strides=l.strides, padding=l.padding,
use_bias=l.use_bias, data_format=l.data_format, activation=l.activation,
name='my_block2_conv1' ) )
base_model.layers[-1].set_weights( l.get_weights() )
l = org_model.get_layer('block2_conv2')
base_model.add( Conv2D( l.filters, l.kernel_size, strides=l.strides, padding=l.padding,
use_bias=l.use_bias, data_format=l.data_format, activation=l.activation,
name='my_block2_conv2' ) )
base_model.layers[-1].set_weights( l.get_weights() )
l = org_model.get_layer('block2_pool')
base_model.add( MaxPooling2D(pool_size=l.pool_size, padding=l.padding, strides=l.strides,
name='my_block2_pool' ) )
l = org_model.get_layer('block3_conv1')
base_model.add( Conv2D( l.filters, l.kernel_size, strides=l.strides, padding=l.padding,
use_bias=l.use_bias, data_format=l.data_format, activation=l.activation,
name='my_block3_conv1' ) )
base_model.layers[-1].set_weights( l.get_weights() )
l = org_model.get_layer('block3_conv2')
base_model.add( Conv2D( l.filters, l.kernel_size, strides=l.strides, padding=l.padding,
use_bias=l.use_bias, data_format=l.data_format, activation=l.activation,
name='my_block3_conv2' ) )
base_model.layers[-1].set_weights( l.get_weights() )
l = org_model.get_layer('block3_pool')
base_model.add( MaxPooling2D(pool_size=l.pool_size, padding=l.padding, strides=l.strides,
name='my_block3_pool' ) )
l = org_model.get_layer('block4_conv1')
base_model.add( Conv2D( l.filters, l.kernel_size, strides=l.strides, padding=l.padding,
use_bias=l.use_bias, data_format=l.data_format, activation=l.activation,
name='my_block4_conv1' ) )
base_model.layers[-1].set_weights( l.get_weights() )
l = org_model.get_layer('block4_conv2')
base_model.add( Conv2D( l.filters, l.kernel_size, strides=l.strides, padding=l.padding,
use_bias=l.use_bias, data_format=l.data_format, activation=l.activation,
name='my_block4_conv2' ) )
base_model.layers[-1].set_weights( l.get_weights() )
l = org_model.get_layer('block4_pool')
base_model.add( MaxPooling2D(pool_size=l.pool_size, padding=l.padding, strides=l.strides,
name='my_block4_pool' ) )
l = org_model.get_layer('block5_conv1')
base_model.add( Conv2D( l.filters, l.kernel_size, strides=l.strides, padding=l.padding,
use_bias=l.use_bias, data_format=l.data_format, activation=l.activation,
name='my_block5_conv1' ) )
base_model.layers[-1].set_weights( l.get_weights() )
l = org_model.get_layer('block5_conv2')
base_model.add( Conv2D( l.filters, l.kernel_size, strides=l.strides, padding=l.padding,
use_bias=l.use_bias, data_format=l.data_format, activation=l.activation,
name='my_block5_conv2' ) )
base_model.layers[-1].set_weights( l.get_weights() )
l = org_model.get_layer('block5_pool')
base_model.add( MaxPooling2D(pool_size=l.pool_size, padding=l.padding, strides=l.strides,
name='my_block5_pool' ) )
for l in base_model.layers:
l.trainable = False
base_model.compile( loss='mean_squared_error', optimizer='adam' )
return base_model
##########################################################################################
# style part
def create_blockstyle( base_model, layer_name, input_shape ):
mod = Model(inputs=base_model.input, outputs=base_model.get_layer(layer_name).output)
oshape = K.int_shape(mod.output)
w,c = oshape[1],oshape[-1]
x = Input( shape=input_shape )
y = mod(x)
y = Reshape( (w*w, c) )(y)
z = Dot( 1 )( [y, y] )
z = Reshape((c,c,1))(z)
z = Conv2D( 1, (1,1), use_bias=False, kernel_initializer=Constant(1.0/(c*w)))(z)
smod = Model( inputs=[x], outputs=[z] )
for l in smod.layers:
l.trainable = False
return smod
def create_style_mod( base_model, input_shape ):
input_s = Input( shape=input_shape )
s1 = create_blockstyle( base_model, 'my_block1_conv1', input_shape )(input_s)
s1 = Flatten()(s1)
s2 = create_blockstyle( base_model, 'my_block2_conv1', input_shape )(input_s)
s2 = Flatten()(s2)
s3 = create_blockstyle( base_model, 'my_block3_conv1', input_shape )(input_s)
s3 = Flatten()(s3)
output_s = Concatenate()([ s1, s2, s3 ])
style_mod = Model(inputs=input_s, outputs=output_s)
print( style_mod.summary() )
return style_mod
##########################################################################################
# content part
def create_content_mod( base_model, input_shape ):
input_c = Input( shape=input_shape )
m4 = Model(inputs=base_model.input, outputs=base_model.get_layer('my_block4_conv2').output)
c4 = m4(input_c)
c4 = Flatten()(c4)
m5 = Model(inputs=base_model.input, outputs=base_model.get_layer('my_block5_conv2').output)
c5 = m5(input_c)
c5 = Flatten()(c5)
output_c = Concatenate()([ c4, c5 ])
m4.trainable = False
m5.trainable = False
content_mod = Model(inputs=input_c, outputs=output_c)
return content_mod
##########################################################################################
# merge model
def create_merge_model( input_shape, style_mod, content_mod ):
input_m = Input(shape=(1,))
W,H,C = input_shape[0],input_shape[1],input_shape[2]
x = Dense(W*H*C, use_bias=False )(input_m)
x = Reshape((W,H,C))(x)
s_x = style_mod(x)
c_x = content_mod(x)
mod = Model(inputs=[input_m], outputs=[s_x,c_x])
style_mod.trainable = False
content_mod.trainable = False
mod.compile(loss=['mean_squared_error', 'mean_squared_error'],
loss_weights=[1., 10.],
optimizer=Adam(lr=1e-2, decay=1e-4, beta_1=0.5))
print(mod.summary())
return mod
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from PIL import Image
import argparse
def compute_mean_var( I ):
s, s2 = np.zeros(3, dtype=np.float), np.zeros(3, dtype=np.float)
for x in range(I.shape[0]):
for y in range(I.shape[1]):
for c in range(3):
s[c] = s[c] + I[x,y,c]
s2[c] = s2[c] + I[x,y,c]*I[x,y,c]
w = 1.0 / (I.shape[0] * I.shape[1])
for c in range(3):
s[c] = s[c]*w
s2[c] = np.sqrt( np.max([ 0.02, s2[c]*w - s[c]*s[c] ]) )
return s,s2
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--style' , type=str)
parser.add_argument('--content', type=str)
args = parser.parse_args()
W = 600
content_img = image.load_img(args.content, target_size=(W,W))
C = image.img_to_array(content_img)
C = np.expand_dims(C, axis=0)
C = C/255.0
c_mu, c_sd = compute_mean_var( C )
style_img = image.load_img(args.style, target_size=(W,W))
S = image.img_to_array(style_img)
S = np.expand_dims(S, axis=0)
S = S/255.0
input_shape = (W,W,3)
base_model = create_base_model( org_model, input_shape )
style_mod = create_style_mod( base_model, input_shape )
content_mod = create_content_mod( base_model, input_shape )
mod = create_merge_model( input_shape, style_mod, content_mod )
style_target = style_mod.predict(S)
content_target = content_mod.predict(C)
ww = mod.layers[1].get_weights()
print(ww[0].shape)
mod.fit( np.ones((1,1)), [style_target, content_target], batch_size=1, epochs=1000, verbose=1 )
w = mod.layers[1].get_weights()
out = w[0].reshape((W,W,3))
O = out
o_mu, o_sd = compute_mean_var( O )
for x in range(W):
for y in range(W):
for c in range(3):
go = c_mu[c] + c_sd[c]*(O[x,y,c]-o_mu[c])/o_sd[c]
O[x,y,c] = np.min([1, np.max([0, go])])
O = O*255.0
Image.fromarray(O.astype(np.uint8)).save('out.png')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment