Skip to content

Instantly share code, notes, and snippets.

@huynhnguyen
Last active June 1, 2017 05:57
Show Gist options
  • Save huynhnguyen/809bfa5f20cd4d87b6a2f96875350038 to your computer and use it in GitHub Desktop.
Save huynhnguyen/809bfa5f20cd4d87b6a2f96875350038 to your computer and use it in GitHub Desktop.
open journal (3/15/2017): tank estimation problem with Edward
'''
This is the edward implementation of German tank problem
link: https://en.wikipedia.org/wiki/German_tank_problem
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Uniform, Empirical, Bernoulli
tf.reset_default_graph()
ed.set_seed(42)
# DATA
y = np.array([10, 256, 202, 97], dtype=np.float32)
# MODEL
#we know N only in range y.max() to 10000 (highest possible number of tanks)
N = Uniform(low=y.max(), high=10000.0)
# y_obs is randomly get from N
y_obs = Uniform(low=0.0, high=tf.ones(4)*N)
# Define 100,000 steps to run MetropolisHasting algorithm
S = 100000
qN = Empirical(params=tf.Variable(tf.zeros([S])+.5))
proposal_qN = Uniform(low=tf.Variable(y.max(), dtype=tf.float32),
high=10000.0)
# INFERENCE
inference = ed.MetropolisHastings({N: qN}, {N: proposal_qN}, data={y_obs: y})
inference.run()
# CRITICISM
sess = ed.get_session()
_, estVars = sess.run([qN.mean(), qN.get_variables()])
#discard 1000 burn-in samples
estN = estVars[0][1000:]
print("Inferred posterior mean:")
print(np.mean(estN))
print("Inferred posterior stddev:")
print(np.std(estN))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment