Skip to content

Instantly share code, notes, and snippets.

@aliostad
Created December 2, 2018 19:32
Show Gist options
  • Save aliostad/96f551ca9fe418046272c7fe8d6e5a81 to your computer and use it in GitHub Desktop.
Save aliostad/96f551ca9fe418046272c7fe8d6e5a81 to your computer and use it in GitHub Desktop.
Solving simple linear function using tensorflow
import keras
import tensorflow as tf
import keras.backend as K
import numpy as np
from tensorflow.contrib.layers import *
# What we want to do: aX + b = Y
''' This one is using without using feed
X = np.random.uniform(-100., 100., 1000)
Y = np.array([x*3.+5. for x in X])
a = tf.Variable(2.0, name='a')
b = tf.Variable(4.0, name='b')
y_pred = a * X + b
loss = tf.reduce_mean(tf.square(Y-y_pred))
optimizer = tf.train.AdamOptimizer(0.01)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
print sess.run([train, a, b])
'''
X = np.random.uniform(-100., 100., 1000)
Y = np.array([x*3.+5. for x in X])
a = tf.Variable(0.0, name='a')
b = tf.Variable(0.0, name='b')
X_p = tf.placeholder(dtype=tf.float32, name='X_p')
Y_p = tf.placeholder(dtype=tf.float32, name='Y_p')
ax = tf.multiply(a, X_p)
y_pred = ax + b
loss = tf.reduce_mean(tf.square(Y_p-y_pred))
optimizer = tf.train.AdamOptimizer(0.1)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
print sess.run([train, a, b], {X_p: X, Y_p: Y})
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment