Skip to content

Instantly share code, notes, and snippets.

@tomonari-masada
Created May 18, 2016 14:35
Show Gist options
  • Save tomonari-masada/36e4683a3ee213c888cc7e03b567d658 to your computer and use it in GitHub Desktop.
Save tomonari-masada/36e4683a3ee213c888cc7e03b567d658 to your computer and use it in GitHub Desktop.
approximating the normal distribution with GAN
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
mu,sigma=5.0,0.2
latent_dim=2
def mlp(input):
w1=tf.get_variable("w1", [input.get_shape()[1], 8], initializer=tf.random_normal_initializer())
b1=tf.get_variable("b1", [8], initializer=tf.constant_initializer(0.1))
w2=tf.get_variable("w2", [8,8], initializer=tf.random_normal_initializer())
b2=tf.get_variable("b2", [8], initializer=tf.constant_initializer(0.1))
w3=tf.get_variable("w3", [8,8], initializer=tf.random_normal_initializer())
b3=tf.get_variable("b3", [8], initializer=tf.constant_initializer(0.1))
w4=tf.get_variable("w4", [8,8], initializer=tf.random_normal_initializer())
b4=tf.get_variable("b4", [8], initializer=tf.constant_initializer(0.0))
w_=tf.get_variable("w_", [8,1], initializer=tf.random_normal_initializer())
b_=tf.get_variable("b_", [1], initializer=tf.constant_initializer(0.0))
fc1=tf.nn.dropout(tf.nn.relu(tf.matmul(input,w1)+b1),0.5)
fc2=tf.nn.dropout(tf.nn.relu(tf.matmul(fc1,w2)+b2),0.5)
fc3=tf.nn.dropout(tf.nn.relu(tf.matmul(fc2,w3)+b3),0.5)
fc4=tf.nn.tanh(tf.matmul(fc3,w4)+b4)
fc_=tf.matmul(fc4,w_)+b_
out=fc_
out_s=tf.sigmoid(fc_)
return out,out_s,[w1,b1,w2,b2,w3,b3,w4,b4,w_,b_]
def optimizer(loss,var_list):
return tf.train.AdamOptimizer().minimize(loss,var_list=var_list)
with tf.variable_scope("G"):
z_node=tf.placeholder(tf.float32, [None,latent_dim])
G,_,theta_g=mlp(z_node)
with tf.variable_scope("D") as scope:
x_node=tf.placeholder(tf.float32, [None,1])
_,D1,theta_d=mlp(x_node)
scope.reuse_variables()
_,D2,theta_d=mlp(G)
obj_d=tf.reduce_mean(tf.log(1e-10+D1)+tf.log(1e-10+1-D2))
obj_g=tf.reduce_mean(tf.log(1e-10+1-D2))
opt_d=optimizer(-obj_d,theta_d)
opt_g=optimizer(obj_g,theta_g)
sess=tf.Session()
sess.run(tf.initialize_all_variables())
TRAIN_ITERS = 10000
BATCH_SIZE = 50
histd, histg= np.zeros(TRAIN_ITERS), np.zeros(TRAIN_ITERS)
for i in range(TRAIN_ITERS):
for j in range(1):
x=np.random.normal(mu,sigma,BATCH_SIZE)
z=np.random.random(BATCH_SIZE*latent_dim)
histd[i],_=sess.run([obj_d,opt_d], {x_node: np.reshape(x,(BATCH_SIZE,1)), z_node: np.reshape(z,(BATCH_SIZE,latent_dim))})
z=np.random.random(BATCH_SIZE*latent_dim)
histg[i],_=sess.run([obj_g,opt_g], {z_node: np.reshape(z,(BATCH_SIZE,latent_dim))})
if i % 100 == 0:
print i, histd[i], histg[i]
z=np.random.random(10000*latent_dim)
x=sess.run(G, {z_node: np.reshape(z,(10000,latent_dim))})
plt.hist(x, bins=50, normed=True)
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment