Skip to content

Instantly share code, notes, and snippets.

@deepanshu-yadav
Last active June 7, 2022 10:18
Show Gist options
  • Save deepanshu-yadav/1a4197acac0914951448a0862270d314 to your computer and use it in GitHub Desktop.
Save deepanshu-yadav/1a4197acac0914951448a0862270d314 to your computer and use it in GitHub Desktop.
import tensorflow as tf
from tensorflow.keras.layers import InputLayer, Dense, Input, Dropout,\
BatchNormalization, Flatten
from tensorflow.keras import regularizers
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.optimizers import Adam
from functools import partial
def create_ae_model(num_hidden_layers=3, hidden_layer_neurons=64,
lr=0.001, kernel_init='lecun_normal', optimizer='adam',
noise_at_input=0.05, noise_in_hidden_layer=0.05,
activity_regularizer=1e-05,
act='relu', dropout=0, input_data_dim=NO_OF_FEATURES):
# assuming all hidden layers have same dimension.
dims = [hidden_layer_neurons, hidden_layer_neurons,
hidden_layer_neurons, hidden_layer_neurons][:num_hidden_layers]
RegularizedDense = partial(tf.keras.layers.Dense,
activation=act,
kernel_initializer=kernel_init,
activity_regularizer=regularizers.l1(activity_regularizer)
)
# We initialze a sequential model.
autoencoder = Sequential()
# Add inputs to the model. Notice input_data_dim deafult value is NO_OF_FEATURES.
autoencoder.add(Input(shape=input_data_dim))
# This is encode phase.
for i, dim in enumerate(dims):
autoencoder.add(RegularizedDense(dim))
autoencoder.add(Dropout(dropout)) if dropout > 0 else ()
dims.reverse()
dims = dims[1:]
# This is the decode phase.
for i, dim in enumerate(dims):
autoencoder.add(RegularizedDense(dim))
autoencoder.add(Dropout(dropout)) if dropout > 0 else ()
autoencoder.add(Dense(input_data_dim, activation='sigmoid'))
autoencoder.summary()
# Using binary cross entropy loss. It's very common for binary classification.
# We will use mean absolute error and mean squared error as additional metrics.
autoencoder.compile(optimizer=Adam(learning_rate=lr),
loss=tf.keras.losses.binary_crossentropy,
metrics=['mae', 'mse'])
return autoencoder
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment