Skip to content

Instantly share code, notes, and snippets.

@thomaspinder
Created February 11, 2019 17:57
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save thomaspinder/26cf7b8492a522037b54a933f3bc840d to your computer and use it in GitHub Desktop.
Save thomaspinder/26cf7b8492a522037b54a933f3bc840d to your computer and use it in GitHub Desktop.
Black-Box Variational Inference in Julia (Work in Progress!), based upon https://github.com/daeilkim/bbvi
using Distributions, Random, LinearAlgebra, Plots, StatPlots
Random.seed!(123);
dims = 2
precision_prior = 4
cov_prior = Matrix{Float64}(I, dims, dims)
pre_prior = precision_prior*cov_prior
mu_prior = [-2, -4]
precision_t = 0.5
likelihood_pre = Matrix{Float64}(I, dims, dims)*precision_t
likelihood_cov = inv(likelihood_pre)
n_samples = 2000
mvn = MvNormal(mu_prior, inv(pre_prior))
gaussian_samples = transpose(rand(mvn, n_samples));
trans_mat = Matrix{Float64}([2.1 1.4; 1. 2.3])
y = zeros(Float64, n_samples, dims);
for i in 1:n_samples
mv_dist = MvNormal(transpose(transpose(gaussian_samples[i,:])*trans_mat), likelihood_cov)
y[i,:] = rand(mv_dist, 1)
end
# Plot
marginalhist(y[:,1], y[:,2], fc=:plasma, bins=40)
posterior_cov = inv(pre_prior + (n_samples * likelihood_pre))
yhat = mean(y, dims=1)
posterior_mu = posterior_cov*(pre_prior*mu_prior)+transpose((yhat*likelihood_pre)*n_samples)
z = rand(MvNormal( reshape(posterior_mu, (2,)), posterior_cov), n_samples)
function q_grad(z, lambda, precision)
qgrad = precision*(z-lambda)
return qgrad
end
function log_joint(y, z, lambda, prior_cov, likelihood_cov)
p_prior = logpdf(MvNormal(lambda, prior_cov), z)
p_likelihood = 0
for i in 1:length(y)
# May need to reference y from alternative direction e.g. y[,:i]
p_likelihood += logpdf(MvNormal(z, likelihood_cov), y[i,:])
end
p_joint = p_prior + p_likelihood
end
u = [2 3 4 3 4 3 3 4; 2 3 4 3 4 3 3 4]
logpdf(MvNormal(reshape(posterior_mu, (2, )), posterior_cov), u)
function bbvi(lam, lam_true, y, cov_prior, likelihood_cov, posterior_cov, nits=100, dims=2)
lam_prev = copy(lam)
lam_time = zeros(Float32, nits, dims)
lam_diff = 10
true_diff = 10
t = 1
n_samples = 25 # number of samples
# Robbins-Monroe Learning Rate
kappa = 0.99
tau = 10
while t < nits
lam_time[t,:] = lam
t = t+1
end
end
function robbins_monroe(μ, D, y, n_samples, prior_cov, likelihood_cov, posterior_cov)
"""
D here is the dimension e.g. 2.
"""
z_dist = MvNormal(μ, Matrix{Float32}(I, D, D)*2)
z_samples = rand(z_dist, n_samples)
f_vals = zeros(Float32, n_samples, D)
h_vals = zeros(Float32, n_samples, D)
for i in 1:n_samples
joint, plog_prior, plog_likelihood = log_joint(y, z_samples[i,:], lam, prior_cov, likelihood_cov)
entropy_dist = MvNormal(μ, posterior_cov)
entropy = logpdf(entropy_dist, z_samples[i,:])
grad = q_grad(z_samples[i,:], μ, posterior_cov)
f_vals[i,:] = grad * (joint-entropy)
h_vals[i,:] = grad
end
elbo = mean(f_vals, dims=1)
return elbo
end
bbvi([1.0, 1.0], [2.0, 2.0], gaussian_samples, cov_prior, likelihood_cov, posterior_cov)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment