Skip to content

Instantly share code, notes, and snippets.

@jhaberstro
Created September 23, 2017 16:46
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jhaberstro/8acd7bb36c3fb6050aab2b8f3f5045e7 to your computer and use it in GitHub Desktop.
Save jhaberstro/8acd7bb36c3fb6050aab2b8f3f5045e7 to your computer and use it in GitHub Desktop.
Variational inference by evolutionary optimization for a simple gaussian model
import numpy as np
from scipy.stats import norm
from math import log
N = 1000
true_loc = 10.0
true_stddev = 0.1
x_data = true_loc + (np.random.randn(N) * true_stddev)
def lognormalpdf(x,loc,scale):
mn = loc
sig=scale
# 1/sqrt(2*pi*sigma^2)*exp(-x^2/2/sigma^2)
return -0.5*log(2*np.pi*sig**2)- (x-mn)**2/sig**2/2.0
def neg_elbo(x):
n_samples = 50
q_log_prob = np.zeros(n_samples)
p_log_prob = np.zeros(n_samples)
for i in range(n_samples):
z = norm.rvs(loc=x, scale=true_stddev)
q_log_prob[i] = lognormalpdf(z, loc=x, scale=true_stddev)
p_log_prob[i] = lognormalpdf(z, loc=0.0, scale=1.0)
for x in x_data:
p_log_prob[i] += lognormalpdf(x, loc=z, scale=true_stddev)
return -np.mean(p_log_prob - q_log_prob)
def gradients(est, n_samples=100, stddev=1):
noise = np.random.standard_normal(size=n_samples)
fs = np.zeros(shape=n_samples)
print("neg_elbo:" + str(neg_elbo(est)))
for i in range(n_samples):
fs[i] = neg_elbo(est + (stddev * noise[i]))
# fs = (fs - np.mean(fs)) / np.std(fs)
return (np.sum(fs*noise) / (n_samples * stddev)) * 0.01
def optimize(initial_est, step_size=0.01, n_steps=10, n_samples=100):
est = initial_est
for i in range(n_steps):
grad = gradients(est, n_samples=n_samples)
print("grad:" + str(grad))
print("est:" + str(est))
est -= step_size * grad
return est
print(optimize(0, n_steps=1000, n_samples=20))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment