Skip to content

Instantly share code, notes, and snippets.

@sjchoi86
Last active March 21, 2017 00:47
Show Gist options
  • Save sjchoi86/1676889bf7a69ae00d5c878519ef7f89 to your computer and use it in GitHub Desktop.
Save sjchoi86/1676889bf7a69ae00d5c878519ef7f89 to your computer and use it in GitHub Desktop.
NHIDDEN = 50
STDEV = 0.1
KMIX = 20 # NUMBER OF MIXTURES
NOUT = KMIX * 3 # PI / MU / STD
x = tf.placeholder(dtype=tf.float32, shape=[None,1], name="x")
y = tf.placeholder(dtype=tf.float32, shape=[None,1], name="y")
Wmdn = {
"l1": tf.Variable(tf.random_normal([1,NHIDDEN], stddev=STDEV, dtype=tf.float32)),
"l2_mds": tf.Variable(tf.random_normal([NHIDDEN,NOUT], stddev=STDEV, dtype=tf.float32)),
"l2_err": tf.Variable(tf.random_normal([NHIDDEN,1], stddev=STDEV, dtype=tf.float32))
}
bmdn = {
"l1": tf.Variable(tf.zeros([1,NHIDDEN], dtype=tf.float32)),
"l2_mds": tf.Variable(tf.random_uniform([1,NOUT], minval=-10, maxval=10, dtype=tf.float32)),
"l2_err": tf.Variable(tf.zeros([1,1], dtype=tf.float32))
}
def hmdn(_x, _W, _b):
sig_gain = 1
_l1 = tf.nn.tanh(tf.matmul(_x, _W['l1']) + _b['l1'])
_out_mds = tf.matmul(_l1, _W['l2_mds']) + _b['l2_mds']
_out_err_sigma_hat = tf.matmul(_l1, _W['l2_err']) + _b['l2_err']
# CONVERT OUTPUT (PI / MU / SIGMA)
_out_pi_hat, _out_sigma_hat, _out_mu = tf.split(_out_mds, 3, 1)
_out_pi_hat = tf.exp(_out_pi_hat - tf.reduce_max(_out_pi_hat, 1, keep_dims=True))
_nor_pi = tf.reciprocal(tf.reduce_sum(_out_pi_hat, 1, keep_dims=True))
_out_pi = tf.multiply(_nor_pi, _out_pi_hat)
_out_sigma = sig_gain*tf.sigmoid(_out_sigma_hat)
# CONVERT NOISE MODEL (SIGMA)
_out_err_sigma = sig_gain*tf.sigmoid(_out_err_sigma_hat)
return _out_pi, _out_sigma, _out_mu, _out_err_sigma
out_pi, out_sigma, out_mu, out_err_sigma = hmdn(x, Wmdn, bmdn)
print ("HETEROSCEDASTIC MIXTURE DENSITY NETOWRK READY")
pi = math.pi
# UNIVARIATE GAUSSIAN MODEL
def tf_normal(_y, _mu, _sigma):
_result = (_y-_mu)/_sigma
_result = -tf.square(_result)/2
_result = tf.exp(_result)/(math.sqrt(2*pi)*_sigma)
return _result
# HETEROSCEDASTIC GAUSSIAN MIXTURE MODEL
def hgmm(_y, _out_pi, _out_sigma, _out_mu, _out_err_sig):
_probs = tf_normal(_y, _out_mu, _out_sigma+_out_err_sig)
_result = tf.multiply(_out_pi, _probs)
_result = tf.reduce_sum(_result, 1, keep_dims=True)
return tf.reduce_mean(-tf.log(_result))
# SET LOSS AND OPTIMIZER
loss = hgmm(y, out_pi, out_sigma, out_mu, out_err_sigma)
optm = tf.train.AdamOptimizer(learning_rate=0.001
, beta1=0.9, beta2=0.999, epsilon=0.1).minimize(loss)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment