Skip to content

Instantly share code, notes, and snippets.

@eph
Last active January 23, 2021 18:16
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save eph/dac4bf1f9864f00fc555ce3de4e3b092 to your computer and use it in GitHub Desktop.
Save eph/dac4bf1f9864f00fc555ce3de4e3b092 to your computer and use it in GitHub Desktop.
simple sequential monte carlo prototype
import pandas as p
from scipy.stats import multivariate_normal
import numpy as np
def tempering_schedule(type='tempering', lam=4.0, nstages=100):
if type=='tempering':
r = np.array([r for r in range(nstages)])
lams = ( (r+1) / nstages ) ** lam
elif type=='adaptive':
yield
for i in range(nstages):
if i == 0: yield lams[i], 0.0
if i > 0:
yield lams[i], lams[i-1]
class SMCState(object):
def __init__(self, npart, npara):
self.npart = npart
self.npara = npara
self.liksim = np.zeros(npart)
self.parasim = np.zeros((npart, npara))
self.priorsim = np.zeros(npart)
self.wtsim = np.ones(npart) / npart
self.acpt = np.zeros(npart)
self.logzt = 0.0
self.c = 0.3
self.ess = npart
self.nresamples = 0
#def update(self, liksim=none, parasim=none
def to_dataframe(self, paranames=None, resample=True):
if paranames is None:
paranames = ['para{:02d}'.format(i) for i in range(self.npara)]
df = np.c_[self.parasim, self.liksim, self.priorsim, self.wtsim]
df = p.DataFrame(df, columns=paranames+['lik','prior','weight'])
return df
from tqdm import tqdm
class SMCSampler(object):
def __init__(self, likelihood, prior, paranames=None,
tuning_schedule=(4.0, 500)):
self.likelihood = likelihood
self.prior = prior
self.tuning_schedule = tuning_schedule
self.nstages = tuning_schedule[1]
self.verbose = True
def initialize(self, npart=5000):
state = SMCState(npart=npart, npara=self.prior.npara)
state.parasim = self.prior.rvs(size=npart)
priors = ( self.prior.logpdf(para) for para in state.parasim )
likelihoods = ( self.likelihood(para) for para in state.parasim )
if self.verbose:
likelihoods = tqdm(likelihoods, total=npart)
#
for i,l in enumerate(likelihoods):
state.liksim[i] = l
priors = tqdm(priors, total=npart)
for i,l in enumerate(priors):
state.priorsim[i] = l
#state.liksim = list( likelihoods )
return state
#state.priorsim = list ( priors )
def estimate(self, npart=5000, parallel=False):
state = self.initialize(npart=npart)
t = tempering_schedule(lam=4.0, nstages=self.nstages)
if self.verbose:
t = tqdm(t, total=self.nstages)
for temp in t:
t.set_description('φ = %4.2f [%i resamples, avg. lik = %5.2f, pos = %5.2f, acpt = %4.2f]'
% (temp[0], state.nresamples, state.liksim.mean(),
state.liksim.mean()+state.priorsim.mean(), state.acpt.mean()))
state = self.iterate(state, temp=temp[0], temp_old=temp[1], parallel=parallel)
#results.record(state)
# #yield results
return state
def iterate(self, state, temp=1.0, temp_old=0.0, parallel=False):
delta_phi = temp - temp_old
state = self._correction(state, delta_phi=delta_phi)
if state.ess < state.npart/2:
state = self._selection(state)
state = self._mutation(state, temp=temp, parallel=parallel)
return state
def _correction(self, state, delta_phi=1.0):
state.wtsim = np.exp(state.liksim * delta_phi ) * state.wtsim
incwt = np.sum(state.wtsim)
state.wtsim = state.wtsim / incwt
state.logzt += np.log(incwt)
state.ess = (1/(state.wtsim**2).sum())
return state
def _selection(self, state, scheme='multinomial'):
if scheme=='multinomial':
counts = np.random.multinomial(state.npart, state.wtsim)
inds = np.repeat(range(state.npart), counts)
state.parasim = state.parasim[inds]
state.liksim = state.liksim[inds]
state.priorsim = state.priorsim[inds]
state.wtsim = np.ones(state.npart) / state.npart
state.ess = state.npart
state.nresamples += 1
return state
def _mutation(self, state, temp=1.0, parallel=False):
mu = np.einsum('i,ij->j', state.wtsim, state.parasim)
sigma = np.einsum('i,ij->ij', state.wtsim, state.parasim-mu)
sigma = state.c**2 * sigma.T.dot(state.parasim-mu)
#sigma_chol = np.linalg.cholesky(sigma)
eps = np.random.multivariate_normal(mean=np.zeros(state.npara),
cov=sigma,
size=state.npart)
u = np.random.rand(state.npart)
acpt = np.zeros((state.npart))
for i in range(state.npart):
para0 = state.parasim[i]
lik0 = state.liksim[i]
pr0 = state.priorsim[i]
para1 = para0.copy()
#para1 = para1 + sigma_chol @ eps[i]
para1 += eps[i]
lik1 = self.likelihood(para1)
pr1 = self.prior.logpdf(para1)
alp = np.exp(temp*(lik1-lik0) + pr1 - pr0)
if u[i] < alp:
state.parasim[i] = para1
state.liksim[i] = lik1
state.priorsim[i] = pr1
state.acpt[i] = 1
if acpt.mean() > 0.25:
state.c = state.c * 1.1
else:
state.c = state.c * 0.9
return state
class HamiltonianSMCSampler(SMCSampler):
def __init__(self, *args, **kwargs):
grad_loglik = kwargs.pop('gradient')
super().__init__(*args, **kwargs)
self.grad_loglik = grad_loglik
self.c = 0.01
def _mutation(self, state, temp=1.0, parallel=False):
mu = np.einsum('i,ij->j', state.wtsim, state.parasim)
sigma = np.einsum('i,ij->ij', state.wtsim, state.parasim-mu)
sigma = sigma.T.dot(state.parasim-mu)
pdfp = multivariate_normal(cov=np.eye(2))
u = np.random.rand(state.npart)
acpt = np.zeros((state.npart))
for i in range(state.npart):
para0 = state.parasim[i]
lik0 = state.liksim[i]
pr0 = state.priorsim[i]
para1 = para0.copy()
p0 = np.linalg.cholesky(sigma) @ np.random.rand(state.npara)
px = p0.copy()
para1, p1 = self.Q(para1, p0.copy(), eps=state.c, L=10, psi=temp)
lik1 = self.likelihood(para1)
pr1 = self.prior.logpdf(para1)
u0 = pdfp.logpdf(p0)
u1 = pdfp.logpdf(p1)
print(lik1,lik0)
alp = np.exp(temp*(lik1-lik0) + pr1 - pr0 + u1 - u0)
if u[i] < alp:
state.parasim[i] = para1
state.liksim[i] = lik1
state.priorsim[i] = pr1
acpt[i] = 1
if acpt.mean() > 0.65:
state.c = state.c * 1.1
else:
state.c = state.c * 0.9
return state
def Q(self, theta, p, psi=1, eps=0.01, L=10):
for i in range(L):
p = p + eps*psi*self.grad_loglik(theta)/2.
theta = theta + eps*p
p = p + eps*psi*self.grad_loglik(theta)/2.
return theta, -p
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment