Skip to content

Instantly share code, notes, and snippets.

@oiehot
Created November 10, 2017 13:17
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save oiehot/96c52ed40f5325e031c6cd919919971a to your computer and use it in GitHub Desktop.
Save oiehot/96c52ed40f5325e031c6cd919919971a to your computer and use it in GitHub Desktop.
Tensorflow GradientDescent
import random
import tensorflow as tf
# tf.constant
sess = tf.Session()
node1 = tf.constant(3.0, dtype=tf.float32) # 상수 노드
node2 = tf.constant(4.0)
node3 = tf.add(node1, node2) # 합 노드
result = sess.run(node1)
print('tf.constant node: %s' % result)
result = sess.run(node2)
print('tf.constant node: %s' % result)
result = sess.run(node3)
print('tf.add node: %s' % result)
# tf.placeholder
a = tf.placeholder(tf.float32) # placeholder: 이후에 값이 주어지기로 예정됨
b = tf.placeholder(tf.float32)
adder_node = a + b # == tf.add(a, b)
result = sess.run(adder_node, {a:3, b:4.5})
print('tf.placeholder tf.add: %s' % result)
result = sess.run(adder_node, {a:5, b:5})
print('tf.placeholder tf.add: %s' % result)
result = sess.run(adder_node, {a:[1,2], b:[3,4]})
print('tf.placeholder tf.add: %s' % result)
add_and_triple_node = adder_node * 3 # == tf.multiply(adder_node, 3)
result = sess.run(add_and_triple_node, {a:3, b:4})
print('tf.placeholder tf.multiply: %s' % result)
# tf.Variable
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W*x + b
init = tf.global_variables_initializer() # tf.Variable는 인스턴스를 만들 때 초기화되지 않는다.
sess.run(init)
result = sess.run(linear_model, {x: [1, 2, 3, 4]})
print('tf.Variable W*x+b: %s' % result)
# loss func(node)
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
linear_model = W*x + b
squared_deltas = tf.square(linear_model - y) # 오차
loss = tf.reduce_sum(squared_deltas) # 오차 합 => 평가 값
init = tf.global_variables_initializer()
sess.run(init) # 변수 초기화
result = sess.run(loss, {x: [1,2,3,4], y: [0, -1, -2, -3]})
print('loss: %s' % result)
# tf.assign (변수 값을 재설정하는 노드)
assign_w_node = tf.assign(W, [-1.])
assign_b_node = tf.assign(b, [1.])
sess.run([assign_w_node, assign_b_node])
result = sess.run(loss, {x: [1,2,3,4], y: [0, -1, -2, -3]})
print('loss: %s' % result) # perfect 0.0
# tf.train
W = tf.Variable([random.random() * 1000000], dtype=tf.float32)
b = tf.Variable([random.random() * 1000000], dtype=tf.float32)
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
linear_model = W*x + b
squared_deltas = tf.square(linear_model - y) # 오차
loss = tf.reduce_sum(squared_deltas) # 오차 합 => 평가 값
init = tf.global_variables_initializer()
sess.run(init) # 변수 초기화
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) # GradientDescent 훈련 알고리즘
train = optimizer.minimize(loss)
print('before train: %s' %sess.run([W,b]) )
for i in range(2500): # 2,500번 훈련
sess.run(train, {x: [1,2,3,4], y:[0,-1,-2,-3]}) # 1 스텝 훈련
print('[%d] W:%s b:%s' % (i, sess.run(W), sess.run(b)) ) # 현재 상태
print('after train: %s' %sess.run([W,b]) )
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment