Skip to content

Instantly share code, notes, and snippets.

@sahasourav17
Created January 7, 2022 15:36
Show Gist options
  • Save sahasourav17/d7aa5899a3f75a9c8f51f6dabfd846bf to your computer and use it in GitHub Desktop.
Save sahasourav17/d7aa5899a3f75a9c8f51f6dabfd846bf to your computer and use it in GitHub Desktop.
#import needed libraries
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import matplotlib.pyplot as plt
#set the hyper-parameters
learning_rate = 0.001
training_epochs = 1000
#Function for calculating sigmoid function
def sigmoid(x):
return 1. / (1. + np.exp(-x))
#initializing fake data
x1 = np.random.normal( -4 , 2 , 500 )
x2 = np.random.normal( 4 , 2 , 500 )
xs = np.append(x1, x2)
ys = np.asarray([ 0. ] * len(x1) + [ 1. ] * len(x2))
# visualize the generated data
plt.figure(figsize=(16,12))
plt.scatter(xs,ys)
#Defining input/output placeholder
X = tf.placeholder(tf.float32, shape= (None,), name = "x")
Y = tf.placeholder(tf.float32, shape= (None,), name = "y")
# Defining parameter node
w = tf.Variable([ 0. , 0. ], name="parameter" , trainable = True )
# Defining model using tensorflow's sigmoid function
y_model = tf.sigmoid(w[ 1 ] * X + w[ 0 ])
#cross entropy loss function
cost = tf.reduce_mean( -Y * tf.log(y_model) - ( 1 - Y) * tf.log( 1 - y_model))
# Define minimizer to use
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
#open a session and updating variable
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# defining a variable to track error
prev_err = 0
#iterating until maximum number of epochs reached or convergence
for epoch in range(training_epochs):
err, _ = sess.run([cost, train_op], {X: xs, Y: ys})
print(epoch, err)
#checking for convergence
if abs(prev_err - err) < 0.0001:
break
#updating previous error value
prev_err = err
w_val = sess.run(w, {X: xs, Y: ys})
# plotting learned sigmoid fuction
all_xs = np.linspace( -10 , 10 , 100 )
plt.plot(all_xs, sigmoid((all_xs * w_val[ 1 ] + w_val[ 0 ])))
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment