Solving simple linear function using tensorflow
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import keras | |
import tensorflow as tf | |
import keras.backend as K | |
import numpy as np | |
from tensorflow.contrib.layers import * | |
# What we want to do: aX + b = Y | |
''' This one is using without using feed | |
X = np.random.uniform(-100., 100., 1000) | |
Y = np.array([x*3.+5. for x in X]) | |
a = tf.Variable(2.0, name='a') | |
b = tf.Variable(4.0, name='b') | |
y_pred = a * X + b | |
loss = tf.reduce_mean(tf.square(Y-y_pred)) | |
optimizer = tf.train.AdamOptimizer(0.01) | |
train = optimizer.minimize(loss) | |
init = tf.global_variables_initializer() | |
sess = tf.Session() | |
sess.run(init) | |
for i in range(1000): | |
print sess.run([train, a, b]) | |
''' | |
X = np.random.uniform(-100., 100., 1000) | |
Y = np.array([x*3.+5. for x in X]) | |
a = tf.Variable(0.0, name='a') | |
b = tf.Variable(0.0, name='b') | |
X_p = tf.placeholder(dtype=tf.float32, name='X_p') | |
Y_p = tf.placeholder(dtype=tf.float32, name='Y_p') | |
ax = tf.multiply(a, X_p) | |
y_pred = ax + b | |
loss = tf.reduce_mean(tf.square(Y_p-y_pred)) | |
optimizer = tf.train.AdamOptimizer(0.1) | |
train = optimizer.minimize(loss) | |
init = tf.global_variables_initializer() | |
sess = tf.Session() | |
sess.run(init) | |
for i in range(1000): | |
print sess.run([train, a, b], {X_p: X, Y_p: Y}) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment