Skip to content

Instantly share code, notes, and snippets.

@tejaskhot
Last active August 29, 2015 14:14
Show Gist options
  • Save tejaskhot/3eb686adea94705fb885 to your computer and use it in GitHub Desktop.
Save tejaskhot/3eb686adea94705fb885 to your computer and use it in GitHub Desktop.
Perceptron Learning Algorithm
import numpy as np
import random
unit_step = lambda x: 0 if x < 0 else 1
## create dummy data
training_data = [ (np.array([0,0,1]), 0), (np.array([0,1,1]), 1), (np.array([1,0,1]), 1), (np.array([1,1,1]), 1), ]
w = np.random.rand(3)
errors = []
learning_rate = 0.2
n = 100
for i in xrange(n):
x, expected = random.choice(training_data)
result = np.dot(w, x)
error = expected - unit_step(result)
errors.append(error)
w += learning_rate * error * x
for x, _ in training_data:
result = np.dot(x, w)
print("{}: {} -> {}".format(x[:2], result, unit_step(result)))
import numpy as np
import random
class Perceptron:
def __init__(self):
self.errors = []
self.w = np.random.rand(3)
def train(self, X, alpha, num_iters):
self.training_data = X
for i in xrange(num_iters):
x, expected = random.choice(self.training_data)
result = np.dot(self.w, x)
error = expected - self.unit_step(result)
self.errors.append(error)
self.w += alpha * error * x
def unit_step(self, val):
return 0 if val < 0 else 1
def test(self):
for x, _ in self.training_data:
val = np.dot(x, self.w)
print("{}: {} -> {}".format(x[:2], val, self.unit_step(val)))
training_data = [ (np.array([0,0,1]), 0), (np.array([0,1,1]), 1), (np.array([1,0,1]), 1), (np.array([1,1,1]), 1), ]
p = Perceptron()
p.train(training_data, 0.2, 100)
p.test()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment