Created
October 22, 2020 04:40
-
-
Save 3catz/0f6ab5bb414b4ba3982d1b6057628975 to your computer and use it in GitHub Desktop.
Code for optimizing LGBM model for quantile regression
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
covariates = trainx; target = trainy | |
def lgb_trainer(num_leaves, learning_rate, | |
max_depth, n_estimators, | |
reg_lambda, | |
#alpha, | |
reg_alpha, | |
subsample): | |
lgb = LGBMRegressor(objective = "quantile", | |
alpha = .95, | |
subsample = subsample, | |
reg_alpha = reg_alpha, | |
reg_lambda = reg_lambda, | |
n_estimators = int(n_estimators), | |
max_depth = int(max_depth), | |
learning_rate = learning_rate) | |
fit_params = {"categorical_feature": catcols} | |
rskf = RepeatedKFold(n_splits = 5, n_repeats = 2) | |
scores = cross_val_score(lgb, covariates, target, | |
cv = rskf, | |
fit_params = fit_params, | |
scoring = "neg_mean_absolute_error" | |
) | |
return np.mean(scores) | |
def lgb_opt(): | |
optimizer = BayesianOptimization( | |
f = lgb_trainer, | |
pbounds = {"num_leaves":(10, 50), | |
"n_estimators" : (200,1000), | |
"learning_rate" : (0.05, 0.40), | |
#"alpha": (0.45, 0.55), | |
"reg_alpha": (0, 10.), | |
"reg_lambda": (0, 10.), | |
"subsample": (0.8, 1.0), | |
"max_depth": (8,15), | |
} | |
) | |
optimizer.maximize(n_iter = 5, init_points = 1) | |
print("Final result:", optimizer.max) | |
return optimizer.max | |
lgb_results = lgb_opt() | |
print(lgb_results) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment