Skip to content

Instantly share code, notes, and snippets.

@vinupriyesh
Created November 1, 2017 11:49
Show Gist options
  • Save vinupriyesh/6d4c8dd26de1c91b528e554204a0aac6 to your computer and use it in GitHub Desktop.
Save vinupriyesh/6d4c8dd26de1c91b528e554204a0aac6 to your computer and use it in GitHub Desktop.
Very simple Logistic Regression using Neural Network to perform OR, AND, XOR
"""
Performing OR, AND, XOR operation using logistic regression which is a very simple neural network without any hidden layer
This model cannot predict XOR just like any other logistic regression as the XOR cannot be seperated by a straight line
@Author : Vinu Priyesh V.A.
"""
import numpy as np
#Compute functions for OR, AND, XOR, this will be used to generate the test set and to validate our results
def compute(x,m,label):
if(label == "XOR"):
return np.logical_xor(x[0,:],x[1,:]).reshape(1,m).astype(int)
if(label == "AND"):
return np.logical_and(x[0,:],x[1,:]).reshape(1,m).astype(int)
if(label == "OR"):
return np.logical_or(x[0,:],x[1,:]).reshape(1,m).astype(int)
#Validation functions for OR, AND, XOR, this will validate whether the predicted results are correct
def validate(x,y,m,label):
y1 = compute(x,m,label)
return np.sum(y1==y)/m*100
#Simple sigmoid, it is better to use ReLU instead
def sigmoid(z):
s = 1 / (1 + np.exp(-z))
return s
#Back prop to get the weight and bias computed using gradient descend
def back_prop(m,w,b,X,Y,iterations,learning_rate):
for i in range(iterations):
A = sigmoid(np.dot(w.T, X) + b)
#Ommitted the cost here, but it is good to visualize the cost
#cost = (- 1 / m) * np.sum(Y * np.log(A) + (1 - Y) * (np.log(1 - A)))
dw = (1 / m) * np.dot(X, (A - Y).T)
db = (1 / m) * np.sum(A - Y)
w = w - learning_rate * dw
b = b - learning_rate * db
return w,b
#Forward prop to get the predictions
def forward_prop(m,w,b,X):
Y = np.zeros((1, m))
w = w.reshape(X.shape[0], 1)
A = sigmoid(np.dot(w.T,X) + b)
for i in range(m):
Y[0, i] = 1 if A[0, i] > 0.5 else 0
return Y
def model(m,iterations,learning_rate,label):
print("\nmodel : {}".format(label))
w = np.random.randn(2,1)
b = 0
#Training phase
X_train = np.random.randint(2,size=(2,m))
Y_train = compute(X_train,m,label);
w,b = back_prop(m,w,b,X_train,Y_train,iterations,learning_rate)
Y1 = forward_prop(m,w,b,X_train)
P_train = validate(X_train,Y1,m,label)
#Testing phase
m*=2
X_test = np.random.randint(2,size=(2,m))
Y1 = forward_prop(m,w,b,X_test)
P_test = validate(X_test,Y1,m,label)
print("Training accuracy : {}%\n\rTesting accuracy : {}%".format(P_train,P_test))
return P_train,P_test
m=1000
iterations = 1000
learning_rate = 0.2
model(m,iterations,learning_rate,"OR")
model(m,iterations,learning_rate,"AND")
model(m,iterations,learning_rate,"XOR")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment