Created
June 17, 2016 00:28
-
-
Save MInner/ec240639e859e4e0897c29a71928268d to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def load_experiment_backup(model_name): | |
filename = 'h_backup/%s.pickle' % model_name | |
if os.path.isfile(filename): | |
with open(filename, 'rb') as handle: | |
return pickle.load(handle) | |
else: | |
return Trials() | |
def update_experiment_backup(trials, model_name): | |
with open('h_backup/%s.pickle' % model_name, 'wb') as f: | |
pickle.dump(trials, f) | |
def run_hyperopt(max_evals, model_name): | |
def hypoeropt_objective(args): | |
config = { | |
'model': { | |
'hid_dim': int(args[0]), | |
'label_hid_dim': int(args[1]), | |
'token_hid_dim': int(args[2]), | |
'dropout_keep_rate': args[3], | |
'learning_rate': args[4] | |
}, | |
'eval': { | |
'step_funcs': ['optimizer'], | |
'score_funcs': ['loss', 'loss_tokn_term', | |
'mean_err_bool', 'mean_err_labl', | |
'mean_err_tokn', 'mean_err_tokn_term'], | |
'score_postprocs': [ | |
('ppx_tokn_term', compute_ppx_postproc(index=1)), | |
], | |
'validation_field_name': 'ppx_tokn_term' | |
}, | |
'exec': { | |
'model_name': model_name, | |
'n_epoch': 100, | |
'early_stop_tolerance': 3, | |
'score_mean_window': 10, | |
'print_every_iter': False, | |
'print_every_epoch': 1, | |
'device_id': '/gpu:2', | |
'save_model': False, | |
}, | |
'data': { | |
'specs': specs, | |
} | |
} | |
print(config) | |
ret = run(data, config) | |
err = ret['best_val_test_score'] | |
export_keys = ['best_epoch_i', 'trainable_n', 'best_val_score'] | |
result = { | |
'loss': err, | |
'status': STATUS_OK, | |
'config': config['model'], | |
'vars': {k:ret[k] for k in export_keys}, | |
} | |
print(result) | |
return result | |
# ---- | |
space = [ | |
hp.qloguniform('him_dim', log(50), log(500), q=1), | |
hp.qloguniform('label_hid_dim', log(50), log(300), q=1), | |
hp.qloguniform('token_hid_dim', log(50), log(300), q=1), | |
hp.uniform('dropout_keep_rate', 0.1, 1.0), | |
hp.loguniform('learning_rate', log(0.0001), log(0.005)), | |
] | |
best_loss = np.inf | |
with tqdm_notebook(range(max_evals), desc='total', total=max_evals) as trail_iter: | |
for train_n in trail_iter: | |
trials = load_experiment_backup(model_name) | |
fmin_args = { | |
'fn': hypoeropt_objective, | |
'space': space, | |
'max_evals': train_n+1, | |
'trials': trials, 'algo': tpe.suggest, | |
} | |
best = fmin(**fmin_args) | |
update_experiment_backup(trials, model_name) | |
last_loss = list(trials)[train_n]['result']['loss'] | |
best_loss = min(last_loss, best_loss) | |
print('Done with evaluation attempt', train_n, last_loss, 'best is', best_loss) | |
trail_iter.set_description('%5g '%best_loss) | |
return best, trials, name | |
def test_hyperopt(): | |
return run_hyperopt(max_evals=100, model_name='branch_top_down_full_ptb') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment