Last active
February 24, 2020 01:07
-
-
Save JuxhinDB/47f90374af3a6328f5090401da09128b to your computer and use it in GitHub Desktop.
A naive Kohonen-Grossberg Counterpropogation Network in Python
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
__author__ = 'Juxhin Dyrmishi Brigjaj' | |
import sys | |
import math | |
import random | |
import argparse | |
def parse_args(): | |
parser = argparse.ArgumentParser(description='A naive Kohonen-Grossberg Counterpropogation Network in Python') | |
parser.add_argument('-l', '--learning-rate', metavar='R', type=float, required=True, | |
help='Float indicating the learning rate (step) the network should use') | |
parser.add_argument('-f', '--csv-file', type=str, required=True, | |
help='Path to CSV file containing dataset') | |
parser.add_argument('-e', '--epoch', type=int, help="Number of epochs to complete", required=True, default=1000) | |
parser.add_argument('-n', '--neurons', type=int, help="Number of neurons (units) to generate", required=True, default=3) | |
return parser.parse_args() | |
def normalise(rows: list=()) -> list: | |
_result = [] | |
for row in rows: | |
_vector_length = math.sqrt(sum([x**2 for x in row])) | |
_result.append([round(x / _vector_length, 4) for x in row]) | |
return _result | |
def generate_random_units(col_len: int, row_len: int) -> list: | |
_result = [] | |
for _ in range(0, row_len): | |
_result.append([round(random.uniform(0.0, 1.0), 4) for _ in range(0, col_len)]) | |
return _result | |
def calculate_nets(row, units): | |
_nets = [] | |
for unit in units: | |
_net = 0.0 | |
for i, _ in enumerate(unit): | |
_net += round(row[i] * unit[i], 4) | |
_nets.append(round(_net, 4)) | |
return _nets | |
def update_units(learning_rate: float, nets: list, row: list, units: list) -> bool: | |
_i = nets.index(max(nets)) | |
for _j, column in enumerate(row): | |
units[_i][_j] = round(units[_i][_j] + learning_rate * (column - units[_i][_j]), 4) | |
def main(): | |
args = parse_args() | |
learning_rate = args.learning_rate | |
unnormalised_dataset = [] | |
try: | |
with open(args.csv_file, 'r') as csv_file: | |
for line in csv_file: | |
unnormalised_dataset.append([float(x) for x in line.split(',')]) | |
except TypeError as e: | |
print("[!] FATAL: Dataset is malformed. Unable to parse values as floats.\n{}".format(str(e))) | |
print("[+] Normalising dataset") | |
rows = normalise(unnormalised_dataset) | |
for row in rows: | |
print('\t'.join([str(x) for x in row])) | |
# Used to determine the number of columns in generate_random_units call | |
# assuming that the dataset is consistent in width | |
__unit_length = len(unnormalised_dataset[0]) | |
random_units = generate_random_units(__unit_length, args.neurons) | |
print("\n[+] Starting Weights:") | |
for unit in random_units: | |
print(','.join([str(x) for x in unit])) | |
print() | |
for i in range(1, args.epoch + 1): | |
if i % 100 == 0: | |
print("[+] Running Epoch #{}".format(str(i))) | |
for row in rows: | |
nets = calculate_nets(row, random_units) | |
update_units(learning_rate, nets, row, random_units) | |
print("\n[+] Final Weights:") | |
for unit in random_units: | |
print(','.join([str(x) for x in unit])) | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Hi, thanks for the code. But where did you code Grosberg in this file?