Skip to content

Instantly share code, notes, and snippets.

@BenjaminFraser
Created June 30, 2018 15:54
Show Gist options
  • Save BenjaminFraser/abb9f4a77aaaaaf380497b4f99230dc8 to your computer and use it in GitHub Desktop.
Save BenjaminFraser/abb9f4a77aaaaaf380497b4f99230dc8 to your computer and use it in GitHub Desktop.
An Adaline binary classifier to illustrate the workings of basic neural units for the blog post.
class Adaline(object):
""" Adaline (Adaptive Linear Neuron) for binary classification.
Minimises the cost function using gradient descent. """
def __init__(self, learn_rate = 0.01, iterations = 100):
self.learn_rate = learn_rate
self.iterations = iterations
def fit(self, X, y, biased_X = False, standardised_X = False):
""" Fit training data to our model """
if not standardised_X:
X = self._standardise_features(X)
if not biased_X:
X = self._add_bias(X)
self._initialise_weights(X)
self.cost = []
for cycle in range(self.iterations):
output_pred = self._activation(self._net_input(X))
errors = y - output_pred
self.weights += (self.learn_rate * X.T.dot(errors))
cost = (errors**2).sum() / 2.0
self.cost.append(cost)
return self
def _net_input(self, X):
""" Net input function (weighted sum) """
return np.dot(X, self.weights)
def predict(self, X, biased_X=False):
""" Make predictions for the given data, X, using unit step function """
if not biased_X:
X = self._add_bias(X)
return np.where(self._activation(self._net_input(X)) >= 0.0, 1, 0)
def _add_bias(self, X):
""" Add a bias column of 1's to our data, X """
bias = np.ones((X.shape[0], 1))
biased_X = np.hstack((bias, X))
return biased_X
def _initialise_weights(self, X):
""" Initialise weigths - normal distribution sample with standard dev 0.01 """
random_gen = np.random.RandomState(1)
self.weights = random_gen.normal(loc = 0.0, scale = 0.01, size = X.shape[1])
return self
def _standardise_features(self, X):
""" Standardise our input features with zero mean and standard dev of 1 """
X_norm = (X - np.mean(X, axis=0)) / np.std(X, axis = 0)
return X_norm
def _activation(self, X):
""" Linear activation function - simply returns X """
return X
# create a perceptron classifier and train on our data
classifier = Adaline(learn_rate = 0.001, iterations = 50)
classifier.fit(X, y)
# plot our misclassification error after each iteration of training
plt.plot(range(1, len(classifier.cost) + 1), classifier.cost)
plt.title("Adaline: learn-rate 0.001")
plt.xlabel('Epochs')
plt.ylabel('Cost (Sum-of-Squares)')
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment