Skip to content

Instantly share code, notes, and snippets.

@MechCoder
Last active March 10, 2016 21:24
Show Gist options
  • Save MechCoder/e5dfd2e78232300f94a6 to your computer and use it in GitHub Desktop.
Save MechCoder/e5dfd2e78232300f94a6 to your computer and use it in GitHub Desktop.
from math import exp
import numpy as np
from scipy.stats import beta
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
from sklearn.utils import safe_mask
from scipy import stats
from scipy.optimize import fmin_l_bfgs_b
def _acquisition_func(x0, gp, prev_best, func, xi=0.01, kappa=1.96):
x0 = np.asarray(x0)
if x0.ndim == 1:
x0 = np.expand_dims(x0, axis=0)
predictions, std = gp.predict(x0, return_std=True)
if func == 'UCB':
acquisition_func = predictions - kappa * std
elif func == 'EI':
# When std is 0.0, Z is huge, safe to say the pdf at Z is 0.0
# and cdf at Z is 1.0
std_mask = std != 0.0
acquisition_func = prev_best - predictions - xi
# if np.any(std_mask):
Z = acquisition_func[std_mask] / std[std_mask]
acquisition_func[std_mask] = std[std_mask] * (
Z * stats.norm.cdf(Z) + stats.norm.pdf(Z))
else:
raise ValueError(
'acquisition_function not implemented yet : '
+ func)
if acquisition_func.shape == (1, 1):
return acquisition_func[0]
return acquisition_func
def bayes_minimize(func, bounds=None, search="sampling",
random_state=None, maxiter=1000, acq="EI",
num_points=500, return_models=False):
rng = np.random.RandomState(random_state)
num_params = len(bounds)
upper_bounds, lower_bounds = zip(*bounds)
upper_bounds = np.asarray(upper_bounds)
lower_bounds = np.asarray(lower_bounds)
x0 = rng.rand(num_params)
func_val = [func(lower_bounds + (upper_bounds - lower_bounds) * x0)]
length_scale = np.ones(num_params)
gp_params = {
'kernel': Matern(length_scale=length_scale, nu=2.5),
'normalize_y': True,
'random_state': random_state
}
gp_models = []
x = np.reshape(x0, (1, -1))
for i in range(maxiter):
gpr = GaussianProcessRegressor(**gp_params)
gpr.fit(x, func_val)
if search == "sampling":
sampling = rng.rand(num_points, num_params)
acquis = _acquisition_func(sampling, gpr, np.min(func_val), acq)
best_arg = np.argmin(acquis)
best_x = sampling[best_arg]
elif search == "lbfgs":
init = rng.rand(num_params)
best_x, _, _ = fmin_l_bfgs_b(
_acquisition_func,
np.asfortranarray(init),
args=(gpr, np.min(func_val), acq),
bounds=bounds, approx_grad=True, maxiter=10)
if return_models:
gp_models.append(gpr)
best_f = func(lower_bounds + (upper_bounds - lower_bounds) * best_x)
x_list = x.tolist()
x_list.append(best_x)
x = np.asarray(x_list)
func_val.append(best_f)
func_ind = np.argmin(func_val)
x_val = x[func_ind]
best_func_val = func_val[func_ind]
opt_x = lower_bounds + (upper_bounds - lower_bounds) * x_val
d = {}
if return_models:
d["return_models"] = gp_models
return opt_x, best_func_val, d
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment