Skip to content

Instantly share code, notes, and snippets.

@fehiepsi
Created April 21, 2018 08:13
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save fehiepsi/bda728ca40b7cb46208eb355b312bacd to your computer and use it in GitHub Desktop.
Save fehiepsi/bda728ca40b7cb46208eb355b312bacd to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import torch\n",
"import torch.autograd as autograd\n",
"import torch.optim as optim\n",
"from torch.distributions import constraints, transform_to\n",
"\n",
"import pyro\n",
"import pyro.contrib.gp as gp\n",
"pyro.set_rng_seed(0)\n",
"smoke_test = ('CI' in os.environ) # for CI testing\n",
"pyro.enable_validation(True) # for debugging"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"def f(x):\n",
" return (6 * x - 2)**2 * torch.sin(12 * x - 4)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# init the model with two input points -0.1 and 1.1\n",
"X = torch.tensor([-0.1, 1.1])\n",
"y = f(X)\n",
"gpmodel = gp.models.GPRegression(X, y, gp.kernels.Matern52(input_dim=1),\n",
" noise=torch.tensor(0.01))"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def update_posterior(x_new):\n",
" y = f(x_new)\n",
" X = torch.cat([gpmodel.X, x_new])\n",
" y = torch.cat([gpmodel.y, y])\n",
" gpmodel.set_data(X, y)\n",
" gpmodel.optimize()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def lower_confidence_bound(x, kappa=2):\n",
" mu, variance = gpmodel(x, full_cov=False, noiseless=False)\n",
" sigma = variance.sqrt()\n",
" return mu - kappa * sigma"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def find_a_candidate(x_init, lower_bound=0, upper_bound=1, printt=False):\n",
" # transform x to an unconstrained domain to set an minimizer for it\n",
" constraint = constraints.interval(lower_bound, upper_bound)\n",
" unconstrained_x_init = transform_to(constraint).inv(x_init)\n",
" unconstrained_x = torch.tensor(unconstrained_x_init, requires_grad=True)\n",
" minimizer = optim.LBFGS([unconstrained_x])\n",
"\n",
" def closure():\n",
" minimizer.zero_grad()\n",
" x = transform_to(constraint)(unconstrained_x)\n",
" y = lower_confidence_bound(x)\n",
" x_grad = autograd.grad(y, unconstrained_x)\n",
" if printt:\n",
" print(\"=\"*20)\n",
" print(\"unconstrained_x\", unconstrained_x, \"x\", x)\n",
" print(\"loss\", y, \"x_grad\", x_grad[0])\n",
" autograd.backward(unconstrained_x, x_grad)\n",
" return y\n",
"\n",
" minimizer.step(closure)\n",
" # after a candidate found in unconstrained domain, convert it back to original domain\n",
" x = transform_to(constraint)(unconstrained_x)\n",
" return x.detach()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def next_x(lower_bound=0, upper_bound=1, num_candidates=3, printt=False):\n",
" candidates = []\n",
" values = []\n",
"\n",
" # last data point will be an init point for first minimum candidate,\n",
" # other minimum candidates will get uniform random initialization\n",
" x_init = gpmodel.X[-1:]\n",
" for i in range(num_candidates):\n",
" if i != 0:\n",
" printt = False \n",
" x = find_a_candidate(x_init, lower_bound, upper_bound, printt)\n",
" y = lower_confidence_bound(x)\n",
" candidates.append(x)\n",
" values.append(y)\n",
" x_init = x.new_empty(1).uniform_(lower_bound, upper_bound)\n",
"\n",
" argmin = torch.min(torch.cat(values), dim=0)[1].item()\n",
" return candidates[argmin]"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"====================\n",
"unconstrained_x tensor([ 1.1446]) x tensor([ 0.7585])\n",
"loss tensor([-6.0632]) x_grad tensor(1.00000e-02 *\n",
" [ 7.2222])\n",
"====================\n",
"unconstrained_x tensor([ 1.0724]) x tensor([ 0.7451])\n",
"loss tensor([-6.0252]) x_grad tensor([-1.2300])\n",
"====================\n",
"unconstrained_x tensor([ 1.1406]) x tensor([ 0.7578])\n",
"loss tensor([-6.0647]) x_grad tensor(1.00000e-02 *\n",
" [ 3.5676])\n",
"====================\n",
"unconstrained_x tensor([ 1.1387]) x tensor([ 0.7574])\n",
"loss tensor([-6.0623]) x_grad tensor(1.00000e-03 *\n",
" [ 8.1626])\n",
"====================\n",
"unconstrained_x tensor([ 1.1381]) x tensor([ 0.7573])\n",
"loss tensor([-6.0635]) x_grad tensor(1.00000e-02 *\n",
" [ 1.1227])\n",
"====================\n",
"unconstrained_x tensor([ 1.1373]) x tensor([ 0.7572])\n",
"loss tensor([-6.0616]) x_grad tensor(1.00000e-02 *\n",
" [-4.6968])\n",
"====================\n",
"unconstrained_x tensor([ 1.1380]) x tensor([ 0.7573])\n",
"loss tensor([-6.0623]) x_grad tensor(1.00000e-02 *\n",
" [-5.9201])\n",
"====================\n",
"unconstrained_x tensor([ 1.1388]) x tensor([ 0.7575])\n",
"loss tensor([-6.0629]) x_grad tensor(1.00000e-02 *\n",
" [-2.1426])\n",
"====================\n",
"unconstrained_x tensor([ 1.1392]) x tensor([ 0.7575])\n",
"loss tensor([-6.0609]) x_grad tensor(1.00000e-02 *\n",
" [-5.6103])\n",
"====================\n",
"unconstrained_x tensor([ 1.1404]) x tensor([ 0.7578])\n",
"loss tensor([-6.0625]) x_grad tensor(1.00000e-03 *\n",
" [ 8.1554])\n",
"====================\n",
"unconstrained_x tensor([ 1.1403]) x tensor([ 0.7577])\n",
"loss tensor([-6.0617]) x_grad tensor(1.00000e-02 *\n",
" [ 4.0780])\n",
"====================\n",
"unconstrained_x tensor([ 1.1395]) x tensor([ 0.7576])\n",
"loss tensor([-6.0646]) x_grad tensor(1.00000e-02 *\n",
" [-1.5299])\n",
"====================\n",
"unconstrained_x tensor([ 1.1397]) x tensor([ 0.7576])\n",
"loss tensor([-6.0623]) x_grad tensor(1.00000e-02 *\n",
" [-1.5297])\n",
"====================\n",
"unconstrained_x tensor([ 3.0815]) x tensor([ 0.9561])\n",
"loss tensor([-2.5705]) x_grad tensor([ 1.1616])\n",
"====================\n",
"unconstrained_x tensor([ 1.1650]) x tensor([ 0.7622])\n",
"loss tensor([-6.0533]) x_grad tensor([ 0.6703])\n",
"====================\n",
"unconstrained_x tensor([-1.4504]) x tensor([ 0.1899])\n",
"loss tensor([-2.3223]) x_grad tensor([ 5.6083])\n",
"====================\n",
"unconstrained_x tensor([-23.3312]) x tensor(1.00000e-11 *\n",
" [ 7.3685])\n",
"loss tensor([-0.3553]) x_grad tensor(1.00000e-09 *\n",
" [-3.9455])\n"
]
}
],
"source": [
"x0 = X.new_empty(1).uniform_(0, 1)\n",
"xmin = x0\n",
"for i in range(10 if not smoke_test else 1):\n",
" update_posterior(xmin)\n",
" xmin = next_x(printt=i==9)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python (pyro)",
"language": "python",
"name": "pyro"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment