Skip to content

Instantly share code, notes, and snippets.

@iancze
Created March 28, 2024 23:56
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save iancze/d557cf5d9159e4d9a67f1721b19895aa to your computer and use it in GitHub Desktop.
Save iancze/d557cf5d9159e4d9a67f1721b19895aa to your computer and use it in GitHub Desktop.
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
plt.style.use("seaborn-v0_8-white")
# Training data is y values representing the brightness profile
y_train = np.array(
[
5,
4,
3,
2,
1.9,
1.7,
1.6,
1.4,
1.5,
1.9,
2.2,
2.0,
1.5,
1.1,
1.0,
0.8,
0.5,
0.3,
0.1,
0.0,
]
)
# the reshape is necessary for X_train because scikit wants (nsamples, nfeatures)
# so X_train should be shape (20, 1), for example.
X_train = np.linspace(0.0, 10.0, len(y_train)).reshape(-1,1)
noise = 0.1 * np.ones_like(y_train)
X_test = np.linspace(0, 10, 80).reshape(-1,1)
fig, ax = plt.subplots(nrows=1)
ax.errorbar(X_train, y_train, yerr=noise, marker=".", ls="")
ax.set_xlabel(r"$r$")
ax.set_ylabel(r"$I_\nu$")
fig.savefig("data.png", dpi=300)
# define constant amplitude prefactor
kernel = 1 * RBF(length_scale=1.0, length_scale_bounds=(1.0, 1e2)) + WhiteKernel()
# The prior mean is assumed to be constant and zero (for normalize_y=False)
# or the training data’s mean (for normalize_y=True).
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=0, normalize_y=False)
y_mean, y_cov = gp.predict(X_train, return_std=False, return_cov=True)
fig, ax = plt.subplots(nrows=1)
ax.errorbar(X_train, y_train, yerr=noise, marker=".", ls="")
ax.errorbar(X_train, y_mean)
ax.set_xlabel(r"$r$")
ax.set_ylabel(r"$I_\nu$")
fig.savefig("prior-prediction.png", dpi=300)
fig, ax = plt.subplots(nrows=1)
ax.imshow(y_cov, interpolation="none")
fig.savefig("prior_cov.png", dpi=300)
# start the fitting process
gp.fit(X_train, y_train)
y_mean, y_cov = gp.predict(X_test, return_std=False, return_cov=True)
y_mean, y_std = gp.predict(X_test, return_std=True, return_cov=False)
fig, ax = plt.subplots(nrows=1)
ax.fill_between(
X_test.ravel(),
y_mean - 2 * y_std,
y_mean + 2 * y_std,
alpha=0.5,
color="C1",
label=r"$2\sigma$ confidence interval",
)
ax.errorbar(X_train, y_train, yerr=noise, marker=".", ls="", color="C0")
ax.errorbar(X_test, y_mean, color="C1")
ax.set_xlabel(r"$r$")
ax.set_ylabel(r"$I_\nu$")
fig.savefig("post-prediction.png", dpi=300)
fig, ax = plt.subplots(nrows=1)
ax.imshow(y_cov, interpolation="none")
fig.savefig("post_cov.png", dpi=300)
print("parameter values", gp.kernel_)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment