Skip to content

Instantly share code, notes, and snippets.

@ferdianjovan
Last active May 13, 2019 11:22
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ferdianjovan/e96e9d2c33ea0a42dc5d7c35a9a4bbc0 to your computer and use it in GitHub Desktop.
Save ferdianjovan/e96e9d2c33ea0a42dc5d7c35a9a4bbc0 to your computer and use it in GitHub Desktop.
MOGP with LCM
import os
import theano
import pymc3 as pm
import numpy as np
import pandas as pd
import theano.tensor as tt
import warnings
warnings.filterwarnings('ignore')
def row_update(col, row, matrix, vector):
return tt.switch(tt.lt(col, row + 1), vector[col+row], matrix[row, col])
def col_update(row, matrix, vector):
res, _ = theano.scan(
fn=row_update, sequences=tt.arange(matrix.shape[1]), non_sequences=[row, matrix, vector]
)
return res
def matrix_update():
s = tt.matrix('s')
g = tt.vector('g')
results, _ = theano.scan(
fn=col_update, sequences=tt.arange(s.shape[0]), non_sequences=[s, g]
)
f = theano.function([s, g], results)
return f
#This functions generate data corresponding to two outputs
f_output1 = lambda x: 4. * np.cos(x/5.) - .4*x - 35. + np.random.rand(x.size)[:,None] * 2.
f_output2 = lambda x: 6. * np.cos(x/5.) + .2*x + 35. + np.random.rand(x.size)[:,None] * 8.
#{X,Y} training set for each output
X1 = np.random.rand(75)[:,None]; X1=X1*75
X2 = np.random.rand(100)[:,None]; X2=X2*100
Y1 = f_output1(X1)
Y2 = f_output2(X2)
#{X,Y} test set for each output
Xt1 = np.random.rand(100)[:,None]*100
Xt2 = np.random.rand(100)[:,None]*100
Yt1 = f_output1(Xt1)
Yt2 = f_output2(Xt2)
# stack X1 and X2 data together
labels = 2
X1_label = np.hstack(((np.ones_like(X1) * 0), X1))
X2_label = np.hstack(((np.ones_like(X2) * 1), X2))
Xs = np.vstack((X1_label, X2_label))
Ys = np.vstack((Y1, Y2)).flatten()
# create model
with pm.Model() as model:
# START attempt to make A = L * L.T
theta = pm.Gamma('theta', alpha=1., beta=.2, shape=int(labels * (labels + 1) / 2))
L = matrix_update()(np.zeros((n, n)), theta)
kappa = pm.Gamma('kappa', alpha=1., beta=.2, shape=(labels))
cov_coregionalization = pm.gp.cov.Coregion(2, W=L, kappa=kappa, active_dims=[0])
# END attempt to make A = L * L.T
length_scale = pm.Gamma('length_scale', alpha=4., beta=.1)
scale = pm.HalfCauchy('scale', beta=3., testval=2.)
cov_data = scale ** 2 * pm.gp.cov.ExpQuad(2, ls=length_scale, active_dims=[1])
cov = cov_coregionalization * cov_data
gp = pm.gp.Marginal(cov_func=cov)
sigma = pm.HalfNormal('sigma', sd=2)
y_ = gp.marginal_likelihood('y', X=Xs, y=Ys, noise=sigma)
approx = pm.ADVI().fit(n=50000)
trace = approx.sample(2500)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment