Skip to content

Instantly share code, notes, and snippets.

@3catz
Created October 22, 2020 04:40
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save 3catz/0f6ab5bb414b4ba3982d1b6057628975 to your computer and use it in GitHub Desktop.
Save 3catz/0f6ab5bb414b4ba3982d1b6057628975 to your computer and use it in GitHub Desktop.
Code for optimizing LGBM model for quantile regression
covariates = trainx; target = trainy
def lgb_trainer(num_leaves, learning_rate,
max_depth, n_estimators,
reg_lambda,
#alpha,
reg_alpha,
subsample):
lgb = LGBMRegressor(objective = "quantile",
alpha = .95,
subsample = subsample,
reg_alpha = reg_alpha,
reg_lambda = reg_lambda,
n_estimators = int(n_estimators),
max_depth = int(max_depth),
learning_rate = learning_rate)
fit_params = {"categorical_feature": catcols}
rskf = RepeatedKFold(n_splits = 5, n_repeats = 2)
scores = cross_val_score(lgb, covariates, target,
cv = rskf,
fit_params = fit_params,
scoring = "neg_mean_absolute_error"
)
return np.mean(scores)
def lgb_opt():
optimizer = BayesianOptimization(
f = lgb_trainer,
pbounds = {"num_leaves":(10, 50),
"n_estimators" : (200,1000),
"learning_rate" : (0.05, 0.40),
#"alpha": (0.45, 0.55),
"reg_alpha": (0, 10.),
"reg_lambda": (0, 10.),
"subsample": (0.8, 1.0),
"max_depth": (8,15),
}
)
optimizer.maximize(n_iter = 5, init_points = 1)
print("Final result:", optimizer.max)
return optimizer.max
lgb_results = lgb_opt()
print(lgb_results)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment