Skip to content

Instantly share code, notes, and snippets.

@jschueller
Last active October 2, 2023 14:48
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jschueller/163fe24888ab94f03e058383d85a04f6 to your computer and use it in GitHub Desktop.
Save jschueller/163fe24888ab94f03e058383d85a04f6 to your computer and use it in GitHub Desktop.
bench OT kriging optim Cobyla
#!/usr/bin/env python
import openturns as ot
from sklearn.metrics import mean_squared_error
import numpy as np
print(ot.__file__)
def calculate_metrics(y_test, y_mean_prediction, y_var_prediction):
# RMSE
rmse = np.sqrt(mean_squared_error(y_test, y_mean_prediction))
return rmse
# definition of problem
class Branin(object):
def __init__(self,Name='Branin',Input_dim=2,Bounds=[[-5.,0.],[10.,15.]]):
self.Name = 'Branin'
self.Input_dim = 2
self.Bounds = [[-5.,0.],[10.,15.]]
return None
def eval(self,x):
x0 = x[0]
x1 = x[1]
a = 1
b = 5.1/(4.*np.pi**2)
c = 5./np.pi
r = 6.
s = 10.
t = 1./(8*np.pi)
y = a*(x1-b*x0**2+c*x0-r)**2+s*(1.-t)*np.cos(x0)+s
return y
# Definition of DoEs (training set and validation set)
Nb_samples_per_dim = 20
rmse_OT_default = np.zeros(Nb_samples_per_dim)
pb = Branin()
pb_dim = pb.Input_dim
pb_bounds = pb.Bounds
Nb_training_samples = Nb_samples_per_dim*pb_dim
lhs_exp = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * pb_dim), Nb_training_samples, True, True)
xtrain_denorm = np.array(pb_bounds[0])+(np.array(pb_bounds[1]) - np.array(pb_bounds[0])) * lhs_exp.generate()
ytrain_denorm = np.zeros((len(xtrain_denorm),1))
for k in range(len(ytrain_denorm)):
ytrain_denorm[k] = pb.eval(xtrain_denorm[k,:])
X_mean = np.mean(xtrain_denorm, 0)
X_std = np.std(xtrain_denorm, 0)
xtrain = (xtrain_denorm - X_mean) / X_std
Y_mean = np.mean(ytrain_denorm, 0)
Y_std = np.std(ytrain_denorm, 0)
ytrain = (ytrain_denorm - Y_mean)/Y_std
xtrain_samples = ot.Sample(xtrain)
ytrain_samples = ot.Sample(ytrain)
Nb_validation_pts = 1000*pb_dim
xvalid_denorm = np.array(pb_bounds[0])+(np.array(pb_bounds[1]) - np.array(pb_bounds[0]))* lhs_exp.generate()
yvalid_denorm = np.zeros((len(xvalid_denorm),1))
for k in range(len(yvalid_denorm)):
yvalid_denorm[k] = pb.eval(xvalid_denorm[k,:])
xvalid = (xvalid_denorm - X_mean) / X_std
yvalid = (yvalid_denorm - Y_mean)/Y_std
xvalid_OT = ot.Sample(xvalid)
yvalid = np.reshape(yvalid,(len(yvalid),))
#setting of data
basis = ot.ConstantBasisFactory(pb_dim).build()
covarianceModel = ot.SquaredExponential([0.1]*pb_dim, [1.0])
#covarianceModel = ot.MaternModel([0.1]*pb_dim, [1.0], 1.5)
#covarianceModel = ot.ExponentialModel([0.1]*pb_dim, [1.0])
#Basic kriging
cobyla1 = ot.Cobyla()
cobyla1.setMaximumEvaluationNumber(10000)
cobyla2 = ot.NLopt('LN_COBYLA')
nelder = ot.NLopt('LN_NELDERMEAD')
for solver in [ot.TNC(), cobyla1, cobyla2, nelder]:
algo = ot.KrigingAlgorithm(xtrain_samples, ytrain_samples, covarianceModel, basis)
algo.setOptimizationBounds(ot.Interval([0.001]*pb_dim, [1000]*pb_dim))
algo.setOptimizationAlgorithm(solver)
algo.run()
# Evaluation of results
metamodel = algo.getResult()
y_mean_prediction = metamodel.getConditionalMean(xvalid_OT)
y_var_prediction = metamodel.getConditionalMarginalVariance(xvalid_OT)
y_mean_prediction = np.array(y_mean_prediction)
y_var_prediction = np.array(y_var_prediction)
rmse = calculate_metrics(yvalid, y_mean_prediction, y_var_prediction)
cName = solver.__class__.__name__
if hasattr(solver, "getAlgorithmName"):
cName += "." + solver.getAlgorithmName()
print(f"solver={cName} rmse={rmse}")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment