Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mmuratarat/224eb0d61d9837406165ef64502196a3 to your computer and use it in GitHub Desktop.
Save mmuratarat/224eb0d61d9837406165ef64502196a3 to your computer and use it in GitHub Desktop.
reset_graph()
n_epochs = 1000
learning_rate = 0.01
epsilon = 1e-7
X = tf.constant(inputs, dtype = tf.float32, name = "x")
y = tf.constant(output, dtype = tf.float32, name = "y")
theta = tf.Variable(tf.random_uniform([n,1], -1.0, 1.0), name ="theta")
logits = tf.matmul(X, theta, name="logits")
#predictions = 1/(1+ tf.exp(-logits))
predictions = tf.sigmoid(logits)
#one can use write hardcoded cost function
loss = -tf.reduce_mean(y* tf.log(predictions + epsilon) + (1 - y) * tf.log(1 - predictions+ epsilon))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
with tf.Session() as sess:
tf.global_variables_initializer().run()
for epoch in range(n_epochs):
sess.run(training_op)
if epoch % 100 == 0:
print("Epoch", epoch, "Loss = ", loss.eval())
best_theta = theta.eval()
print(best_theta)
#[[-0.27450362]
# [ 1.1188453 ]
# [-1.4013102 ]]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment