Skip to content

Instantly share code, notes, and snippets.

@nkthiebaut
Created December 30, 2019 19:35
Show Gist options
  • Save nkthiebaut/d6f90a8561f5268429600b53ed4495cc to your computer and use it in GitHub Desktop.
Save nkthiebaut/d6f90a8561f5268429600b53ed4495cc to your computer and use it in GitHub Desktop.
Hyperopt usage example with sklearn
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
def hyperopt_train_test(params):
t = params['type']
del params['type']
if t == 'gb':
clf = GradientBoostingClassifier(**params)
elif t == 'rf':
clf = RandomForestClassifier(**params)
else:
return 0
roc_auc = cross_val_score(clf, x_train.append(x_val), y_train.append(y_val), scoring="roc_auc", cv=5).mean()
return roc_auc
# return {'status': STATUS_OK, 'roc_auc_mean': roc_auc.mean(), 'roc_auc_std': roc_auc.std()}
space = hp.choice('classifier_type', [
{
'type': 'gb',
'n_estimators': hp.quniform('gb_n_estimators', 10, 100, 10),
'learning_rate': hp.lognormal('gb_learning_rate', 0.05, 0.3),
'criterion': hp.choice('gb_criterion', ['gini', 'entropy']),
'max_depth': hp.choice('gb_max_depth',
[None, hp.qlognormal('gb_max_depth_int', 3, 1, 1)]),
'min_samples_split': hp.uniform('gb_min_samples_split', 0.01, 0.05),
},
{
'type': 'rf',
'criterion': hp.choice('rf_criterion', ['gini', 'entropy']),
'max_depth': hp.choice('rf_max_depth',
[None, hp.qlognormal('rf_max_depth_int', 3, 1, 1)]),
'min_samples_split': hp.uniform('rf_min_samples_split', 0.01, 0.05),
},
])
best = 0
def f(params):
global best
score = hyperopt_train_test(params.copy())
if score > best:
best = score
print('new best:', best, params)
return {'loss': -score, 'status': STATUS_OK}
trials = Trials()
best = fmin(f, space, algo=tpe.suggest, max_evals=3, trials=trials)
print('best:')
print(best)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment