Skip to content

Instantly share code, notes, and snippets.

View himanshurawlani's full-sized avatar

Himanshu Rawlani himanshurawlani

View GitHub Profile
@himanshurawlani
himanshurawlani / tune_reporter_callback.py
Last active September 10, 2020 18:45
An example Keras callback to report metrics to Ray Tune after every epoch
class TuneReporter(tf.keras.callbacks.Callback):
"""Tune Callback for Keras."""
def __init__(self, reporter=None, freq="epoch", logs=None):
"""Initializer.
Args:
freq (str): Sets the frequency of reporting intermediate results.
"""
self.iteration = 0
logs = logs or {}
@himanshurawlani
himanshurawlani / ray_tune_trainable.py
Last active September 6, 2020 19:14
An example script to initialize trainable for Ray Tune and start hyperparameter tuning
class Trainable:
def __init__(self, train_dir, val_dir, snapshot_dir, final_run=False):
# Initializing state variables for the run
self.train_dir = train_dir
self.val_dir = val_dir
self.final_run = final_run
self.snapshot_dir = snapshot_dir
def train(self, config, reporter=None):
# If you get out of memory error try reducing the maximum batch size
@himanshurawlani
himanshurawlani / initialize_ray_tune.py
Last active September 6, 2020 18:10
An example for creating trial scheduler and search algorithm using Ray Tune
logger.info("Initializing ray")
ray.init(configure_logging=False)
logger.info("Initializing ray search space")
search_space, intial_best_config = create_search_space()
logger.info("Initializing scheduler and search algorithms")
# Use HyperBand scheduler to earlystop unpromising runs
scheduler = AsyncHyperBandScheduler(time_attr='training_iteration',
metric="val_loss",
@himanshurawlani
himanshurawlani / hyperopt_search_space.py
Last active September 6, 2020 14:57
An example HyperOpt search space
{
"lr": hp.choice("lr", [0.0001, 0.001, 0.01, 0.1]),
"batch_size": hp.choice("batch_size", [8, 16, 32, 64]),
"use_contrast": hp.choice("use_contrast", ["True", "False"]),
"contrast_factor": hp.choice("contrast_factor", [0.1, 0.2, 0.3, 0.4]),
"use_rotation": hp.choice("use_rotation", ["True", "False"]),
"rotation_factor": hp.choice("rotation_factor", [0.1, 0.2, 0.3, 0.4]),
"use_flip": hp.choice("use_flip", ["True", "False"]),
"flip_mode": hp.choice("flip_mode", ["horizontal", "vertical"]),
"dropout_rate": hp.choice("dropout_rate", [0.1, 0.2, 0.3, 0.4, 0.5]),
@himanshurawlani
himanshurawlani / hyper_model_config.json
Last active September 5, 2020 20:22
An example config for hyper model
{
"lr": 0.001,
"batch_size": 16,
"use_contrast": "True",
"contrast_factor": 0.2,
"use_rotation": "True",
"rotation_factor": 0.2,
"use_flip": "True",
"flip_mode": "horizontal",
"dropout_rate": 0.2,
@himanshurawlani
himanshurawlani / keras_preprocessing_layers.py
Created September 5, 2020 19:58
An example of Keras preprocessing layers in TF2
def augment_images(x, config):
if config['use_contrast'] == "True":
x = tf.keras.layers.experimental.preprocessing.RandomContrast(
config['contrast_factor']
)(x)
if config['use_rotation'] == "True":
x = tf.keras.layers.experimental.preprocessing.RandomRotation(
config['rotation_factor']
)(x)
@himanshurawlani
himanshurawlani / hyper_model.py
Last active September 5, 2020 20:10
An example script to create a hyper model
def FCN_model(config, len_classes=5):
input = tf.keras.layers.Input(shape=(None, None, 3))
# Adding data augmentation layers
x = augment_images(input, config)
# You can create a fixed number of convolutional blocks or
# You can also use a loop if number of layers is also a hyperparameter
x = tf.keras.layers.Conv2D(filters=config['conv_block1_filters'], kernel_size=3, strides=1)(x)
def make_serving_request(image_batch):
data = json.dumps({"signature_name": "serving_default",
"instances": image_batch.tolist()})
headers = {"content-type": "application/json"}
os.environ['NO_PROXY'] = 'localhost'
json_response = requests.post(
'http://localhost:8501/v1/models/flower_classifier:predict', data=data, headers=headers)
def export(input_h5_file, export_path):
# The export path contains the name and the version of the model
tf.keras.backend.set_learning_phase(0) # Ignore dropout at inference
model = tf.keras.models.load_model(input_h5_file)
model.save(export_path, save_format='tf')
print(f"SavedModel created at {export_path}")
def train(model, train_generator, val_generator, epochs = 50):
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
checkpoint_path = './snapshots'
os.makedirs(checkpoint_path, exist_ok=True)
model_path = os.path.join(checkpoint_path, 'model_epoch_{epoch:02d}_loss_{loss:.2f}_acc_{acc:.2f}_val_loss_{val_loss:.2f}_val_acc_{val_acc:.2f}.h5')
history = model.fit_generator(generator=train_generator,