Created
May 18, 2019 05:37
-
-
Save ridwanbejo/758645450ec91c4647989450a659ecdc to your computer and use it in GitHub Desktop.
Adaptable to various length of feature of your dataset
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import math | |
class ActivationFunction(): | |
def sigmoid(self, x): | |
return 1 / (1 + math.exp(-x)) | |
def binary_hard_limit(self, x): | |
if x <= 0: | |
return 0 | |
elif x > 0: | |
return 1 | |
class Perceptron(): | |
def __init__(self, bias=0, learning_rate=0): | |
self.weight = {} | |
self.bias = bias | |
self.learning_rate = learning_rate | |
self.activation = ActivationFunction() | |
def generate_weight(self, feature_len): | |
for k in range(0, feature_len): | |
idx = 'w'+str(k+1) | |
self.weight.update({idx:0}) | |
def hebb_rule(self, feature): | |
y_in = 0 | |
for k in range(0, len(feature)): | |
y_in = y_in + (feature[k] * self.weight['w'+str(k+1)]) | |
y_in = y_in + self.bias | |
return y_in | |
def fit(self, dataset=[], epoch=1): | |
dataset_len = len(dataset) | |
self.generate_weight(len(dataset[0][0])) | |
for i in range(0, EPOCH): | |
print ("EPOCH --- ", i+1) | |
print ("--------------") | |
for j in range(0, dataset_len): | |
feature = dataset[j][0] | |
target = dataset[j][1] | |
y_in = self.hebb_rule(feature) | |
y = self.activation.binary_hard_limit(y_in) | |
if y != target: | |
learning_error = target - y | |
for k in range(0, len(feature)): | |
idx = 'w'+str(k+1) | |
new_weight = self.weight[idx] + (self.learning_rate * learning_error * feature[k]) | |
self.weight[idx] = new_weight | |
new_bias = self.bias + (self.learning_rate * learning_error) | |
self.bias = new_bias | |
print ("weight: ", self.weight, " | BIAS : ", self.bias) | |
print ("--------------") | |
def predict(self, feature): | |
y_in = self.hebb_rule(feature) | |
y = self.activation.binary_hard_limit(y_in) | |
return y | |
BIAS = 0 | |
LEARNING_RATE = 1 | |
EPOCH = 10 | |
weight = {'w1': 0, 'w2':0} | |
# dataset = [ | |
# ([0, 0], 0), | |
# ([0, 1], 0), | |
# ([1, 0], 0), | |
# ([1, 1], 1) | |
# ] | |
dataset = [ | |
([0, 0, 0], 0), | |
([0, 1, 0], 0), | |
([1, 0, 0], 0), | |
([1, 1, 0], 1), | |
([0, 0, 1], 0), | |
([0, 1, 1], 1), | |
([1, 0, 1], 1), | |
([1, 1, 1], 1) | |
] | |
model = Perceptron(bias=BIAS, learning_rate=LEARNING_RATE) | |
model.fit(dataset=dataset, epoch=EPOCH) | |
# print (model.predict([1, 1])) | |
# print (model.predict([0, 0])) | |
# print (model.predict([0, 1])) | |
# print (model.predict([1, 0])) | |
print (model.predict([1, 1, 1])) | |
print (model.predict([0, 0, 1])) | |
print (model.predict([0, 1, 0])) | |
print (model.predict([1, 0, 0])) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment