Skip to content

Instantly share code, notes, and snippets.

@AidanRocke
Created March 14, 2018 00:29
Show Gist options
  • Star 3 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save AidanRocke/0a3ff41c8421a974640742d57bee8b71 to your computer and use it in GitHub Desktop.
Save AidanRocke/0a3ff41c8421a974640742d57bee8b71 to your computer and use it in GitHub Desktop.
approximating a uniform distribution with a normal distribution
"""
Created on Tue Mar 13 19:17:39 2018
@author: aidanrocke
"""
import tensorflow as tf
import numpy as np
def normal_approximation(a,b):
# data
x = tf.placeholder(dtype=tf.float32)
INIT_MU_PARAMS = {'loc': 0.0, 'scale': 0.1, 'size': 1}
INIT_LOG_SIGMA_PARAMS = {'loc': 1.0, 'scale': 0.1 , 'size': 1}
RANDOM_SEED = 0
# params
np.random.seed(RANDOM_SEED)
mu = tf.Variable(initial_value=np.random.normal(**INIT_MU_PARAMS),
dtype=tf.float32)
log_sigma = tf.Variable(initial_value=np.random.normal(**INIT_LOG_SIGMA_PARAMS),
dtype=tf.float32)
sigma = tf.exp(log_sigma)
# loss
gaussian_dist = tf.contrib.distributions.Normal(loc=mu, scale=sigma)
log_prob = gaussian_dist.log_prob(value=x)
neg_log_likelihood = -1.0 * tf.reduce_sum(log_prob)
# gradient
grad = tf.gradients(neg_log_likelihood, [mu, log_sigma])
LEARNING_RATE = 0.01
MAX_ITER = 10000
TOL_PARAM, TOL_LOSS, TOL_GRAD = 1e-6, 1e-6, 1e-6
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
train_op = optimizer.minimize(loss=neg_log_likelihood)
x_obs = np.random.uniform(low=a,high=b,size = (10000,1))
with tf.Session() as sess:
# initialize
sess.run(fetches=tf.global_variables_initializer())
i = 1
obs_mu, obs_log_sigma, obs_sigma = sess.run(fetches=[[mu], [log_sigma], [sigma]])
obs_loss = sess.run(fetches=[neg_log_likelihood], feed_dict={x: x_obs})
obs_grad = sess.run(fetches=[grad], feed_dict={x: x_obs})
while True:
# gradient step
sess.run(fetches=train_op, feed_dict={x: x_obs})
# update parameters
new_mu, new_log_sigma, new_sigma = sess.run(fetches=[mu, log_sigma, sigma])
diff_norm = np.linalg.norm(np.subtract([new_mu, new_log_sigma],
[obs_mu[-1], obs_log_sigma[-1]]))
# update loss
new_loss = sess.run(fetches=neg_log_likelihood, feed_dict={x: x_obs})
loss_diff = np.abs(new_loss - obs_loss[-1])
# update gradient
new_grad = sess.run(fetches=grad, feed_dict={x: x_obs})
grad_norm = np.linalg.norm(new_grad)
obs_mu.append(new_mu)
obs_log_sigma.append(new_log_sigma)
obs_sigma.append(new_sigma)
obs_loss.append(new_loss)
obs_grad.append(new_grad)
if diff_norm < TOL_PARAM:
print('Parameter convergence in {} iterations!'.format(i))
break
if loss_diff < TOL_LOSS:
print('Loss function convergence in {} iterations!'.format(i))
break
if grad_norm < TOL_GRAD:
print('Gradient convergence in {} iterations!'.format(i))
break
if i >= MAX_ITER:
print('Max number of iterations reached without convergence.')
break
i += 1
print("The estimated mean is {} and estimated variance is {}".format(obs_mu[-1], obs_sigma[-1]))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment