Last active
December 29, 2017 00:16
-
-
Save tdeboissiere/3f1f66ac5d790a4c5b17176f46eb0ca3 to your computer and use it in GitHub Desktop.
KL for scalar gaussian mixture
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import math | |
import numpy as np | |
import tensorflow as tf | |
log_sigma1 = 0. | |
log_sigma2 = -0.5 | |
prior_pi = 0.25 | |
sigma1 = math.exp(log_sigma1) | |
sigma2 = math.exp(log_sigma2) | |
mu = np.random.normal(0, 1, (200,200)).astype(np.float32) | |
sigma = 1 + np.random.uniform(0, 1, (200,200)).astype(np.float32) | |
def compute_KL_v1(x, mu, sigma): | |
""" | |
Compute KL divergence between posterior and prior. | |
""" | |
posterior = tf.contrib.distributions.Normal(mu, sigma) | |
KL = tf.reduce_sum(posterior.log_prob(tf.reshape(x, [-1]))) | |
N1 = tf.contrib.distributions.Normal(0.0, sigma1) | |
N2 = tf.contrib.distributions.Normal(0.0, sigma2) | |
mix1 = tf.reduce_sum(N1.log_prob(x), 1) + tf.log(prior_pi) | |
mix2 = tf.reduce_sum(N2.log_prob(x), 1) + tf.log(1.0 - prior_pi) | |
prior_mix = tf.stack([mix1, mix2]) | |
KL += -tf.reduce_sum(tf.reduce_logsumexp(prior_mix, [0])) | |
return KL | |
def compute_KL_v2(x, mu, sigma): | |
""" | |
Compute KL divergence between posterior and prior. | |
""" | |
x_flat = tf.reshape(x, [-1]) | |
posterior = tf.contrib.distributions.Normal(mu, sigma) | |
log_posterior = tf.reduce_sum(posterior.log_prob(x_flat)) | |
N1 = tf.contrib.distributions.Normal(0.0, tf.exp(log_sigma1)) | |
N2 = tf.contrib.distributions.Normal(0.0, tf.exp(log_sigma2)) | |
prior1 = tf.log(prior_pi) + N1.log_prob(x_flat) | |
prior2 = tf.log(1.0 - prior_pi) + N2.log_prob(x_flat) | |
prior_mix = tf.stack([prior1, prior2]) | |
log_prior = tf.reduce_sum(tf.reduce_logsumexp(prior_mix, [0])) | |
return log_posterior - log_prior | |
def compute_KL_v3(x, mu, sigma): | |
""" | |
Compute KL divergence between posterior and prior. | |
""" | |
x_flat = tf.reshape(x, [-1]) | |
posterior = tf.contrib.distributions.Normal(mu, sigma) | |
log_posterior = tf.reduce_sum(posterior.log_prob(x_flat)) | |
N1 = tf.contrib.distributions.Normal(0.0, tf.exp(log_sigma1)) | |
N2 = tf.contrib.distributions.Normal(0.0, tf.exp(log_sigma2)) | |
prior1 = prior_pi * tf.exp(N1.log_prob(x_flat)) | |
prior2 = (1.0 - prior_pi) * tf.exp(N2.log_prob(x_flat)) | |
prior = prior1 + prior2 | |
log_prior = tf.reduce_sum(tf.log(prior)) | |
return log_posterior - log_prior | |
X_mu = tf.placeholder(tf.float32, shape=[200,200]) | |
X_sigma = tf.placeholder(tf.float32, shape=[200,200]) | |
epsilon = tf.contrib.distributions.Normal(0.0, 1.0).sample([200, 200]) | |
X = X_mu + epsilon * X_sigma | |
KL1 = compute_KL_v1(X, | |
tf.reshape(X_mu, [-1]), | |
tf.reshape(X_sigma, [-1])) | |
KL2 = compute_KL_v2(X, | |
tf.reshape(X_mu, [-1]), | |
tf.reshape(X_sigma, [-1])) | |
KL3 = compute_KL_v3(tf.reshape(X, [-1]), | |
tf.reshape(X_mu, [-1]), | |
tf.reshape(X_sigma, [-1])) | |
sess = tf.Session() | |
kl1, kl2, kl3 = sess.run([KL1, KL2, KL3], {X_mu: mu, X_sigma:sigma}) | |
print(kl1) | |
print(kl2) | |
print(kl3) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment