Skip to content

Instantly share code, notes, and snippets.

@murayama333
Created June 7, 2017 08:33
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save murayama333/60a6276e57fee34d91bfac34d89e8cb1 to your computer and use it in GitHub Desktop.
Save murayama333/60a6276e57fee34d91bfac34d89e8cb1 to your computer and use it in GitHub Desktop.
import numpy as np
import tensorflow as tf
W = tf.Variable([.3], tf.float32)
b = tf.Variable([-.3], tf.float32)
x = tf.placeholder(tf.float32)
model = W * x + b
y = tf.placeholder(tf.float32)
loss = tf.reduce_sum(tf.square(model - y))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
x_train = [1,2,3,4]
y_train = [0,-1,-2,-3]
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
sess.run(train, {x:x_train, y:y_train})
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x:x_train, y:y_train})
print("W: %s b; %s loss: %s"%(curr_W, curr_b, curr_loss))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment