Skip to content

Instantly share code, notes, and snippets.

@qbx2
Created March 19, 2017 05:18
Show Gist options
  • Save qbx2/fe70843ccf0b3af8abc98128b6dcdd1c to your computer and use it in GitHub Desktop.
Save qbx2/fe70843ccf0b3af8abc98128b6dcdd1c to your computer and use it in GitHub Desktop.
import tensorflow as tf
import random
W = tf.get_variable('W', shape=[1, 4])
ph_y = tf.placeholder(tf.int32, [1]) # labels
prob = tf.nn.softmax(W)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=W, labels=ph_y
)
)
# minimize = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
minimize = tf.train.AdamOptimizer(0.01).minimize(loss)
def get_train_data(n=1):
ret = []
for _ in range(n):
r = random.random()
if r < .60:
ret.append(0)
elif r < .70:
ret.append(1)
elif r < .85:
ret.append(2)
else:
ret.append(3)
return ret
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
for i in range(100000):
ys = get_train_data(1)
ret = sess.run([loss, minimize], feed_dict={ph_y: ys})
if i % 1000 == 0:
print(i, ret, sess.run(prob))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment