Last active
June 11, 2019 10:19
-
-
Save MortisHuang/2d69365da03bd3d1cf2b873b6c7d767f to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
import keras.backend as K | |
from keras.engine.topology import Layer, InputSpec | |
from keras.layers import Dense, Input | |
from keras.models import Model | |
from keras.optimizers import SGD | |
from keras import callbacks | |
from keras.initializers import VarianceScaling | |
def autoencoder(dims, act='relu', init='glorot_uniform'): | |
""" | |
Fully connected auto-encoder model, symmetric. | |
Arguments: | |
dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer. | |
The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1 | |
act: activation, not applied to Input, Hidden and Output layers | |
return: | |
(ae_model, encoder_model), Model of autoencoder and model of encoder | |
""" | |
n_stacks = len(dims) - 1 | |
# input | |
input_img = Input(shape=(dims[0],), name='input') | |
x = input_img | |
# internal layers in encoder | |
for i in range(n_stacks-1): | |
x = Dense(dims[i + 1], activation=act, kernel_initializer=init, name='encoder_%d' % i)(x) | |
# hidden layer | |
encoded = Dense(dims[-1], kernel_initializer=init, name='encoder_%d' % (n_stacks - 1))(x) # hidden layer, features are extracted from here | |
x = encoded | |
# internal layers in decoder | |
for i in range(n_stacks-1, 0, -1): | |
x = Dense(dims[i], activation=act, kernel_initializer=init, name='decoder_%d' % i)(x) | |
# output | |
x = Dense(dims[0], kernel_initializer=init, name='decoder_0')(x) | |
decoded = x | |
return Model(inputs=input_img, outputs=decoded, name='AE'), Model(inputs=input_img, outputs=encoded, name='encoder') | |
def createFolder(directory): | |
try: | |
if not os.path.exists(directory): | |
os.makedirs(directory) | |
except OSError: | |
print ('Error: Creating directory. ' + directory) | |
dims = [x.shape[-1], 500, 500, 2000, 10] | |
init = VarianceScaling(scale=1. / 3., mode='fan_in',distribution='uniform') | |
pretrain_optimizer = SGD(lr=1, momentum=0.9) | |
pretrain_epochs = 300 | |
batch_size = 256 | |
save_dir = './results' | |
createFolder(save_dir) | |
autoencoder, encoder = autoencoder(dims, init=init) | |
from keras.utils import plot_model | |
plot_model(autoencoder, to_file=save_dir+'/autoencoder.png', show_shapes=True) | |
plot_model(encoder, to_file=save_dir+'/encoder.png', show_shapes=True) | |
from IPython.display import Image | |
Image(filename=save_dir+'/autoencoder.png') | |
Image(filename=save_dir+'/encoder.png') | |
#%% | |
autoencoder.compile(optimizer=pretrain_optimizer, loss='mse') | |
autoencoder.fit(x, x, batch_size=batch_size, epochs=pretrain_epochs) #, callbacks=cb) | |
autoencoder.save_weights(save_dir + '/ae_weights.h5') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment