Skip to content

Instantly share code, notes, and snippets.

@obeshor
Created February 19, 2021 23:56
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save obeshor/25b36190c493d2da1fffff4d37aeba0e to your computer and use it in GitHub Desktop.
Save obeshor/25b36190c493d2da1fffff4d37aeba0e to your computer and use it in GitHub Desktop.
def model_builder_tl(hp):
# Flatten the output layer to 1 dimension
x = layers.Flatten()(last_output)
# Add a fully connected layer with hidden units and ReLU activation
# Tune the number of units in the Dense layer
# Choose an optimal value between 32-512
hp_units = hp.Int('units', min_value = 32, max_value = 512, step = 32,)
# Tune the activation function for Dense layer
# Choose an optimal value from relu, tanh, sigmoid
hp_activation_dense = hp.Choice( "dense_activation", values=["relu", "tanh", "sigmoid"], default="relu" )
x = layers.Dense(units = hp_units, activation = hp_activation_dense)(x)
# Add a dropout rate
# Tune the Dropout
# Choose an optimal value between 0.1-0.5
hp_dropout = hp.Float("dropout_1", min_value=0.1, max_value=0.5, default=0.25, step=0.05)
x = layers.Dropout(rate=hp_dropout)(x)
# Add a final softmax layer for classification
x = layers.Dense (3, activation='softmax')(x)
model = Model( pre_trained_model.input, x)
# Tune the learning rate for the optimizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice('learning_rate', values = [1e-2, 1e-3, 1e-4])
model.compile(optimizer = keras.optimizers.Adam(learning_rate = hp_learning_rate),
loss = keras.losses.CategoricalCrossentropy(from_logits = True),
metrics = ['accuracy'])
return model
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment