Skip to content

Instantly share code, notes, and snippets.

@rmsander
Last active March 24, 2021 18:10
Show Gist options
  • Save rmsander/60df745404efb65a3fad46e65bd7f6e1 to your computer and use it in GitHub Desktop.
Save rmsander/60df745404efb65a3fad46e65bd7f6e1 to your computer and use it in GitHub Desktop.
import gpytorch
import numpy as np
import matplotlib.pyplot as plt
import torch
# Create features
X = torch.tensor(np.arange(100)).float()
# Create targets as noisy function of features
Y = torch.tensor(np.add(np.sin(X / 10), np.random.normal(loc=0, scale=0.5, size=100))).float()
# We will use the simplest form of GP model, exact inference
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
# Initialize likelihood and model
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = ExactGPModel(X, Y, likelihood)
# this is for running the notebook in our testing framework
import os
training_iter = 50
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
# Performs "training" via hyperparameter optimization
for i in range(training_iter):
# Zero gradients from previous iteration
optimizer.zero_grad()
# Output from model
output = model(X)
# Calc loss and backprop gradients
loss = -mll(output, Y)
loss.backward()
print('Iter %d/%d - Loss: %.3f lengthscale: %.3f noise: %.3f' % (
i + 1, training_iter, loss.item(),
model.covar_module.base_kernel.lengthscale.item(),
model.likelihood.noise.item()
))
optimizer.step()
# Places model in "posterior mode"
model = model.eval()
# Create test data
X_test = torch.tensor(np.arange(0, 100, 0.01)).float()
# Makes predictions without noise
f_preds = model(X_test)
# Attributes of predictive function
f_mean = f_preds.mean
f_var = f_preds.variance
f_covar = f_preds.covariance_matrix
# Run with the no_grad and LOVE (fast variance prediction) context manager
with torch.no_grad(), gpytorch.settings.fast_pred_var():
# Initialize plot
f, ax = plt.subplots(1, 1, figsize=(4, 3))
# Make predictions with noise model
observed_pred = likelihood(model(X_test))
# Get upper and lower confidence bounds
lower, upper = observed_pred.confidence_region()
# Plot training data as black stars
ax.scatter(X.numpy(), Y.numpy())
# Plot predictive means as blue line
ax.plot(X_test.numpy(), observed_pred.mean.numpy(), 'b')
# Shade between the lower and upper confidence bounds
ax.fill_between(X_test.numpy(), lower.numpy(), upper.numpy(), alpha=0.5)
ax.set_ylim([-3, 3])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment