Skip to content

Instantly share code, notes, and snippets.

@mohitd
Created December 6, 2023 05:28
Show Gist options
  • Save mohitd/eb73c9635dc6e99b56694b4c24175585 to your computer and use it in GitHub Desktop.
Save mohitd/eb73c9635dc6e99b56694b4c24175585 to your computer and use it in GitHub Desktop.
Artificial Neuron Implementation
import matplotlib.pyplot as plt
from sklearn import datasets
import numpy as np
# fix the seed for determinism
np.random.seed(42)
def cost(pred, true):
return 0.5 * (true - pred) ** 2
def dcost(pred, true):
return -(true - pred)
def sigmoid(z):
return 1. / (1 + np.exp(-z))
def dsigmoid(z):
return sigmoid(z) * (1 - sigmoid(z))
class ArtificialNeuron:
def __init__(self, input_size, learning_rate=0.5, num_epochs=50, minibatch_size=32):
self.learning_rate = learning_rate
self.num_epochs = num_epochs
self.minibatch_size = minibatch_size
self.W_ = np.zeros(input_size)
self.b_ = 0
def train(self, X, y):
self.costs_ = []
for _ in range(self.num_epochs):
epoch_cost = 0
# shuffle data each epoch
permute_idxes = np.random.permutation(X.shape[0])
X = X[permute_idxes]
y = y[permute_idxes]
for start in range(0, X.shape[0], self.minibatch_size):
minibatch_cost = 0
dW = np.zeros(self.W_.shape[0])
db = 0
# partition dataset into minibatches
Xs, ys = X[start:start+self.minibatch_size], y[start:start+self.minibatch_size]
for x_i, y_i in zip(Xs, ys):
# forward pass
a_i = self._forward(x_i)
# backward pass
dW_i, db_i = self._backward(x_i, y_i)
# accumulate cost and gradient
minibatch_cost += cost(a_i, y_i)
dW += dW_i
db += db_i
# average cost and gradients across minibatch size
dW = dW / self.minibatch_size
db = db / self.minibatch_size
# accumulate cost over the epoch
minibatch_cost = minibatch_cost / self.minibatch_size
epoch_cost += minibatch_cost
# update weights
self.W_ = self.W_ - self.learning_rate * dW
self.b_ = self.b_ - self.learning_rate * db
# record cost at end of each epoch
self.costs_.append(epoch_cost)
def _forward(self, x):
# compute and cache intermediate values for backwards pass
self.z = np.dot(x, self.W_) + self.b_
self.a = sigmoid(self.z)
return self.a
def _backward(self, x, y):
# compute gradients
dW = dcost(self.a, y) * dsigmoid(self.z) * x
db = dcost(self.a, y) * dsigmoid(self.z)
return dW, db
# Load the Iris dataset
iris = datasets.load_iris()
data = iris.data
target = iris.target
# Select only the Setosa and Versicolor classes (classes 0 and 1)
setosa_versicolor_mask = (target == 0) | (target == 1)
data = data[setosa_versicolor_mask]
target = target[setosa_versicolor_mask]
# Extract the sepal length and sepal width features into a dataset
sepal_length = data[:, 0]
petal_length = data[:, 2]
X = np.vstack([sepal_length, petal_length]).T
# Train the artificial neuron
an = ArtificialNeuron(input_size=2)
# to recreate full gradient descent, use minibatch_size=X.shape[0] like
# an = ArtificialNeuron(input_size=2, minibatch_size=X.shape[0])
an.train(X, target)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
# Create a scatter plot of values
ax1.scatter(sepal_length[target == 0], petal_length[target == 0], label="Setosa", marker='o')
ax1.scatter(sepal_length[target == 1], petal_length[target == 1], label="Versicolor", marker='x')
# Plot separating line
w1, w2 = an.W_[0], an.W_[1]
b = an.b_
x_values = np.linspace(min(sepal_length), max(sepal_length), 100)
y_values = (-w1 * x_values - b) / w2
ax1.plot(x_values, y_values, label="Separating Line", color="k")
# Set plot labels and legend
ax1.set_xlabel("Sepal Length (cm)")
ax1.set_ylabel("Petal Length (cm)")
ax1.legend(loc='upper right')
ax1.set_title('Artificial Neuron Output')
# Plot neuron cost
ax2.plot(an.costs_, label="Error", color="r")
ax2.set_xlabel("Epoch")
ax2.set_ylabel("Cost")
ax2.legend(loc='upper left')
ax2.set_title('Artificial Neuron Cost')
# Show the plot
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment