Created
October 10, 2019 20:39
-
-
Save nemanja-rakicevic/551763a6a28149972caccadaafe3bacb to your computer and use it in GitHub Desktop.
implementation of LSTM and CONV1d autoencoders
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
############################################################################### | |
############################################################################### | |
############################################################################### | |
class LSTM_AE(object): | |
""" | |
Class for constructing an AE architecture with sequence inputs, | |
using LSTMs. | |
Resource: https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html | |
""" | |
def _traj_encoder(self, traj_input): | |
""" Create an encoder from the AE architecture blueprint """ | |
with tf.variable_scope('lstm_encoder', reuse=tf.AUTO_REUSE): | |
h_size = 1 | |
# traj_input = layer.LSTM(h_size, activation='relu', return_sequences=True, input_shape=(self.timesteps, self.traj_dim))(x_input) | |
# traj_input = layer.LSTM(h_size, activation='relu', return_sequences=True)(traj_input) | |
# traj_input = layer.TimeDistributed(layer.Dense(h_size))(traj_input) # not neccessary | |
with tf.variable_scope('latent_representation', reuse=tf.AUTO_REUSE): | |
_, z_mean, state_c = layer.LSTM(self.latentdim_traj_ae, | |
# activation='linear', | |
return_sequences=True, | |
return_state=True)(traj_input) | |
z_var_log = layer.LSTM(self.latentdim_traj_ae, | |
# activation='linear' | |
)(traj_input) | |
z_var = tf.exp(z_var_log) + 1e-20 | |
eps = tf.random_normal(tf.shape(z_var), mean=0., stddev=1.0, | |
dtype=tf.float32, name='epsilon') | |
z = tf.add(z_mean, tf.multiply(tf.sqrt(z_var), eps), | |
name="latent_var") | |
states = [z_mean, state_c] | |
return z, z_mean, z_var, states # z_repeated, states | |
def _traj_decoder(self, z_traj_input, states=None): | |
""" Create a decoder from the AE architecture blueprint """ | |
with tf.variable_scope('lstm_decoder', reuse=tf.AUTO_REUSE): | |
z_traj_input = layer.RepeatVector(self.inputdim_traj_ae[0])(z_traj_input) | |
h_size = 1 | |
# z_traj_input = layer.LSTM(h_size, activation='relu', return_sequences=True)(z_traj_input, initial_state=states) | |
z_traj_input = layer.LSTM(h_size, | |
# activation='relu', | |
return_sequences=True)(z_traj_input) | |
# z_traj_input = layer.LSTM(h_size, activation='relu', return_sequences=True)(z_traj_input) | |
traj_hat = layer.TimeDistributed( | |
layer.Dense(self.latentdim_traj_ae, | |
activation='linear'))(z_traj_input) | |
return traj_hat | |
############################################################################### | |
############################################################################### | |
############################################################################### | |
class CONV1D_AE(): | |
""" | |
Class for constructing an AE architecture with sequence inputs, | |
using 1D convolution. | |
Resources: | |
- https://towardsdatascience.com/autoencoders-for-the-compression-of-stock-market-data-28e8c1a2da3e | |
- https://blog.goodaudience.com/introduction-to-1d-convolutional-neural-networks-in-keras-for-time-sequences-3a7ff801a2cf | |
- | |
""" | |
def _traj_encoder(self, traj_input): | |
h_size = 1 | |
with tf.variable_scope('1Dconv_encoder', reuse=tf.AUTO_REUSE): | |
traj_input = layer.Conv1D(h_size, 3, activation = 'relu', padding = 'same')(traj_input) | |
traj_input = layer.MaxPool1D(4, padding = 'same')(traj_input) | |
traj_input = layer.Conv1D(1, 3, activation = 'relu', padding = 'same')(traj_input) | |
traj_input = layer.MaxPool1D(4, padding = 'same')(traj_input) | |
traj_input = layer.Flatten()(traj_input) | |
z_mean = layer.Dense(self.latentdim_traj_ae, activation='tanh')(traj_input) | |
z_var_log = layer.Dense(self.latentdim_traj_ae, activation='linear')(traj_input) | |
z_var = tf.exp(z_var_log) + 1e-20 | |
eps = tf.random_normal(tf.shape(z_var), mean=0., stddev=1.0, | |
dtype=tf.float32, name='epsilon') | |
z = tf.add(z_mean, tf.multiply(tf.sqrt(z_var), eps), | |
name="latent_var") | |
return z, z_mean, z_var, [] # z_repeated, states | |
def _traj_decoder(self, z_traj_input, states=None): | |
h_size = 1 | |
with tf.variable_scope('1Dconv_decoder', reuse=tf.AUTO_REUSE): | |
z_traj_input = layer.Dense(50)(z_traj_input) | |
z_traj_input = layer.Reshape((25, 2))(z_traj_input) | |
z_traj_input = layer.Conv1D(h_size, 3, activation = 'relu', padding = 'same')(z_traj_input) | |
z_traj_input = layer.UpSampling1D(2)(z_traj_input) | |
traj_hat = layer.Conv1D(2, 1, activation = 'relu', padding = 'same')(z_traj_input) | |
return traj_hat | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment