Skip to content

Instantly share code, notes, and snippets.

@armhold
Created June 3, 2016 16:27
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save armhold/4523f5073c5ee6f1a8751cd8ad3c7029 to your computer and use it in GitHub Desktop.
Save armhold/4523f5073c5ee6f1a8751cd8ad3c7029 to your computer and use it in GitHub Desktop.
tuning hyperparameters
best_model = None
best_val_acc = 0
best_lr = None
num_trials=10
for trial in range(num_trials):
# best so far is 2.74799e-4, aka 10 ** -3.5609848520174077
#lr_exp = np.random.uniform(-5, 2)
lr_exp = np.random.uniform(-3.65, -3.2)
lr = 10 ** lr_exp
reg_exp = np.random.uniform(-5.02, -4.4) # (-5.6, -5.5)
reg = 10 ** reg_exp
weight_scale_exp = np.random.uniform(-1.8, -1.31)
weight_scale = 10 ** weight_scale_exp # orig: 5e-2
model = GeorgeNet(num_convnets=2, num_affine=2,
hidden_dim=100, # default: 500
weight_scale=weight_scale,
use_batchnorm=True,
reg=reg)
num_epochs = 1 # orig: 5
solver = Solver(model, data,
num_epochs=num_epochs, batch_size=100,
update_rule='adam',
optim_config={
'learning_rate': lr
},
verbose=False)
solver.train()
print "trial: %d, lr_exp: %g, reg_exp: %g, weight_scale_exp: %g, val_acc: %g" % (trial, lr_exp, reg_exp, weight_scale_exp, solver.best_val_acc)
if solver.best_val_acc > best_val_acc:
best_model = model
best_val_acc = solver.best_val_acc
best_lr = lr
print "best_val_acc: ", best_val_acc
print "best_lr: ", best_lr
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment