Skip to content

Instantly share code, notes, and snippets.

@ryancallihan
Created May 9, 2017 17:44
Show Gist options
  • Save ryancallihan/3abb61816fdcdacec3e0026ee1456b9c to your computer and use it in GitHub Desktop.
Save ryancallihan/3abb61816fdcdacec3e0026ee1456b9c to your computer and use it in GitHub Desktop.
Perceptron Practice
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.5.2 (/usr/bin/python3.5)" project-jdk-type="Python SDK" />
</project>
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/Perceptron.iml" filepath="$PROJECT_DIR$/.idea/Perceptron.iml" />
</modules>
</component>
</project>
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="TestRunnerService">
<option name="PROJECT_TEST_RUNNER" value="Unittests" />
</component>
</module>
import numpy as np
from random import *
from pylab import plot, ylim, show
import matplotlib
unit_step = lambda x: 0 if x < 0 else 1
training_data = [ (np.array([0,0,1]), 0),
(np.array([0,1,1]), 1),
(np.array([1,0,1]), 1),
(np.array([1,1,1]), 1),
]
w = np.random.rand(3) #This may not work
errors = []
eta = 0.2
n = 100
for i in range(n):
x, expected = choice(training_data) # Is this an iterator?
result = np.dot(w,x)
error = expected - unit_step(result)
errors.append(error)
w += eta * error * x
for x, _ in training_data: # The , _ will skip the second elem of a tuple.
result = np.dot(x,w)
print("{}: {} -> {}".format(x[:2],result, unit_step(result))) #This is a useful print function
ylim([-1,1])
plot(errors)
show()
#Is this updating?
#
# import numpy as np
#
# #In class assingment 9-5-17
# #Create a vector w such that the unbiased classification
# #function will separate the following data points:
# #Perform perceptron learning on the following dataset. the initial weight vector is initialized to zero
#
# #ANSWER will use classify function to return dot product of w x, must make sure it's a number
# def classify(w, b, x):
# if w @ x + b >= 0:
# return 1
# else:
# return -1
#
# #What does classify result mean, need a place to store it
# def update(w, b, x, y):
# y_p = classify(w, b, x)
# if y_p != y:
# w += y_p * x
# #bias gets added in the same way as weight, except we add 1 or subtract 1 // after this not in class
# b += y_p
# else:
# w += y_p
# b += y_p
# return y_p
#
# #(<0.1, 0.9>, -1)
# #(<0.9, 0.2>, +1)
# #(<0.3, 0.6>, -1)
# #(<0.7, 0.3>, +1)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment