Skip to content

Instantly share code, notes, and snippets.

@mzdravkov
Created November 15, 2023 18:27
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mzdravkov/dfdfaff877b2ea2383dea5f6bc71047d to your computer and use it in GitHub Desktop.
Save mzdravkov/dfdfaff877b2ea2383dea5f6bc71047d to your computer and use it in GitHub Desktop.
from keras.constraints import max_norm
def get_res_blocks(definitions, input, l2_factor=0.0001, kernel_constraint_norm=2.0):
hiddenx = tf.keras.layers.Dense(definitions[0],
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(l2_factor),
kernel_constraint=max_norm(kernel_constraint_norm),
bias_initializer=he_init)(input)
hiddeny = tf.keras.layers.Dense(definitions[0],
# activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(l2_factor),
kernel_constraint=max_norm(kernel_constraint_norm),
bias_initializer=he_init)(hiddenx)
add = tfkl.Add()([input, hiddeny])
batch_norm = tf.keras.layers.BatchNormalization()(add)
dropout = tfkl.Dropout(DROPOUT_RATE, seed=seed)(batch_norm)
for size in definitions[1:len(definitions)-1]:
hiddenx = tf.keras.layers.Dense(size,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(l2_factor),
kernel_constraint=max_norm(kernel_constraint_norm),
bias_initializer=he_init)(dropout)
hiddeny = tf.keras.layers.Dense(size,
# activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(l2_factor),
kernel_constraint=max_norm(kernel_constraint_norm),
bias_initializer=he_init)(hiddenx)
if size == dropout.shape[-1]:
add = tfkl.Add()([dropout, hiddeny])
batch_norm = tf.keras.layers.BatchNormalization()(add)
else:
batch_norm = tf.keras.layers.BatchNormalization()(hiddeny)
dropout = tfkl.Dropout(DROPOUT_RATE, seed=seed)(batch_norm)
hiddenx = tf.keras.layers.Dense(definitions[-1],
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(l2_factor),
kernel_constraint=max_norm(kernel_constraint_norm),
bias_initializer=he_init)(dropout)
hiddeny = tf.keras.layers.Dense(definitions[-1],
# activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(l2_factor),
kernel_constraint=max_norm(kernel_constraint_norm),
bias_initializer=he_init)(hiddenx)
if definitions[-1] == dropout.shape[-1]:
add = tfkl.Add()([dropout, hiddeny])
batch_norm = tf.keras.layers.BatchNormalization()(add)
else:
batch_norm = tf.keras.layers.BatchNormalization()(hiddeny)
dropout = tfkl.Dropout(DROPOUT_RATE, seed=seed)(batch_norm)
return dropout
DROPOUT_RATE = 0.5
# Create an input layer with shape (96, 96, 3)
inputs = tfk.Input(shape=(96, 96, 3))
preprocessing_steps = tfk.Sequential(
[
tf.keras.layers.Normalization(),
tfk.layers.RandomBrightness(0.1),
tfk.layers.RandomContrast(0.3),
tfkl.RandomFlip("horizontal"),
tfkl.RandomRotation(0.2),
tfkl.GaussianNoise(0.1),
]
)
preprocesing_layer = preprocessing_steps(inputs)
# Connect mobilenet to the input
mobilenet = tf.keras.applications.MobileNetV3Large(
input_shape=(96, 96, 3),
include_top=False,
weights="imagenet",
pooling='avg',
)
# Use the supernet as feature extractor, i.e. freeze all its weigths
mobilenet.trainable = True
mobilenet_layer = mobilenet(preprocesing_layer)
he_init = tf.keras.initializers.HeNormal(seed=seed)
transitional_layer = tfkl.Dense(512,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0001),
bias_initializer=he_init)(mobilenet_layer)
batch_norm = tf.keras.layers.BatchNormalization()(transitional_layer)
dropout = tfkl.Dropout(DROPOUT_RATE, seed=seed)(batch_norm)
res_blocks = get_res_blocks([512, 512, 256, 128],
dropout,
l2_factor=0.0001,
kernel_constraint_norm=2.5)
# Add a Dense layer with 2 units and softmax activation as the classifier
outputs = tfkl.Dense(1, activation='sigmoid')(res_blocks)
# Create a Model connecting input and output
mobilenet_model2 = tfk.Model(inputs=inputs, outputs=outputs, name='efficientnet_resblock2')
optimizer = tfk.optimizers.Adam()
# optimizer = tfk.optimizers.SGD(learning_rate=0.01, momentum=0.9)
# optimizer = tfk.optimizers.Nadam()
mobilenet_model2.compile(loss=tfk.losses.BinaryCrossentropy(label_smoothing=0.1),
optimizer=optimizer,
metrics=['accuracy'])
# Display model summary
mobilenet_model2.summary()
zero_weight = y_train[:, 1].sum()/y_train.shape[0] # weight for the healthy class
one_weight = y_train[:, 0].sum()/y_train.shape[0] # weight for the unhealthy class
# Train the model
history = mobilenet_model2.fit(
x=X_train,
y=y_train[:, 1],
class_weight={0: zero_weight, 1: one_weight},
batch_size=32,
epochs=100,
shuffle=True,
validation_data=(X_test, y_test[:, 1]),
callbacks=[tfk.callbacks.EarlyStopping(monitor='val_accuracy', mode='max', patience=20, restore_best_weights=False),
tfk.callbacks.ReduceLROnPlateau(monitor='val_accuracy', factor=0.1, patience=5, mode='auto'),
tf.keras.callbacks.ModelCheckpoint(f'best_mobilenet_model', monitor="val_accuracy", mode="max", save_best_only=True, verbose=1)]
).history
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment