Skip to content

Instantly share code, notes, and snippets.

@petulla
Created May 31, 2021 13:53
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save petulla/441d18f86e465fb211d2edf262d8f96e to your computer and use it in GitHub Desktop.
Save petulla/441d18f86e465fb211d2edf262d8f96e to your computer and use it in GitHub Desktop.
Logistic regression numpy vanilla implementation
import numpy as np
inp = np.array([0,1,2,2,23,35,32,52])
target = np.array([0,0,0,0,1,1,1,1])
inp = inp.reshape(8,1)
target = target.reshape(8,1)
params = 1
class Model:
def __init__(self, dim_inp, lr):
self.m = np.random.random_sample((dim_inp,))
self.b = np.random.random_sample((dim_inp,))
self.lr = lr
def forward(self, x):
return sigmoid(self.b + self.m.T @ x)
def update(self, dX, X):
self.m -= self.lr * (X @ dX)
self.b -= self.lr * np.mean(dX, axis=0, keepdims=True)
def predict(self, x):
return self.forward(x)
def sigmoid(x):
return 1/(1 + np.exp(-x))
def cost(y_pred, y_train):
y_pred[y_pred <= 0] = 0.0000001
y_pred_neg = 1 - y_pred
y_pred_neg[y_pred_neg <= 0] = 0.0000001 # to avoid overflow and underflow
loss = -np.mean(y_train * np.log(y_pred) + (1 - y_train) * np.log(y_pred_neg))
return loss
def dcost(y_pred, y_train):
return y_pred - y_train
def train(model, steps):
losses = []
for j in range(epochs):
for i in range(steps):
x,y = inp[i],target[i]
p = model.forward(x)
loss = cost(p,y)
model.update(dcost(p,y), x)
losses.append(loss)
out = []
for i in range(steps):
p = model.predict(inp[i])
out.append(p)
return np.array(out).reshape(8,1), losses
import matplotlib.pyplot as plt
plt.scatter(inp, target)
plt.show()
steps = 8
epochs = 20
lr = 0.1
model = Model(params, lr)
predictions, losses = train(model, steps)
plt.scatter(range(steps * epochs), losses)
plt.show()
plt.scatter(inp, predictions)
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment