Skip to content

Instantly share code, notes, and snippets.

View sayakpaul's full-sized avatar
:octocat:
Learn, unlearn and relearn.

Sayak Paul sayakpaul

:octocat:
Learn, unlearn and relearn.
View GitHub Profile
# Calculate batch size
batch_size_per_replica = 32
batch_size = batch_size_per_replica * strategy.num_replicas_in_sync
import tensorflow as tf
import os
os.environ["TF_DETERMINISTIC_OPS"] = "1"
SEED = 666
import tensorflow as tf
tf.random.set_seed(SEED)
import numpy as np
np.random.seed(SEED)
sweep_config = {
"method": "random", #grid, random
"metric": {
"name": "accuracy",
"goal": "maximize"
},
"parameters": {
"epochs": {
"values": [10, 15, 20]
},
# Set up model checkpoint callback
filepath = wandb.run.dir + "/{epoch:02d}-{val_accuracy:.2f}.ckpt"
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath,
monitor="val_accuracy",
verbose=1,
save_best_only=True, mode="max")
{
'Cargo': 1,
'Military': 2,
'Carrier': 3,
'Cruise': 4,
'Tankers': 5
}
├── train
│ ├── images [8932 entries]
│ └── train.csv
└── test_ApKoW4T.csv
policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
model = tf.keras.models.Sequential(
tf.keras.layers.Input((100,)),
tf.keras.layers.Dense(10, dtype=policy),
tf.keras.layers.Dense(10, dtype=policy),
# Softmax should be done in float32 for numeric stability.
tf.keras.layers.Activation('Softmax', dtype='float32')
)
from tensorflow.keras import backend as K
K.clear_session()
policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
model = tf.keras.models.Sequential(
tf.keras.layers.Input((100,)),
tf.keras.layers.Dense(10, dtype=policy),
# Softmax should be done in float32 for numeric stability.
tf.keras.layers.Activation('Softmax', dtype='float32')
)