Skip to content

Instantly share code, notes, and snippets.

@eliaperantoni
Created October 29, 2017 12:56
Show Gist options
  • Save eliaperantoni/0ac2dcfcd2f54b7217e283e1519f910b to your computer and use it in GitHub Desktop.
Save eliaperantoni/0ac2dcfcd2f54b7217e283e1519f910b to your computer and use it in GitHub Desktop.
Neural network
# imports
import numpy as np
import matplotlib.pyplot as pyplot
import scipy.special as scipyspec
%matplotlib inline
from IPython.display import clear_output
# neural network class definition
class NeuralNetwork:
# initialise the neural network
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# set number of input, hidden and output nodes
self.inodes = input_nodes
self.hnodes = hidden_nodes
self.onodes = output_nodes
# set the learning rate
self.lr = learning_rate
#w eights
self.wih = np.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))
self.who = np.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))
# sigmoid function
self.activation_function = lambda x: scipyspec.expit(x)
# train the neural network
def train(self, input_list, target_list):
# convert list to 2d array
input_layer = np.array(input_list, ndmin=2).T
target_layer = np.array(target_list, ndmin=2).T
hidden_layer = np.dot(self.wih, input_layer)
hidden_layer = self.activation_function(hidden_layer)
output_layer = np.dot(self.who, hidden_layer)
output_layer = self.activation_function(output_layer)
# calculate hidden/output errors
hidden_output_errors = target_layer - output_layer
# calculate input/hidden errors
input_hidden_errors = np.dot(self.who.T, hidden_output_errors)
# refine weights
self.who += self.lr * np.dot((hidden_output_errors * output_layer * (1.0 - output_layer)), np.transpose(hidden_layer))
self.wih += self.lr * np.dot((input_hidden_errors * hidden_layer * (1.0 - hidden_layer)), np.transpose(input_layer))
# query the neural network
def query(self, input_list):
# convert list to 2d array
input_layer = np.array(input_list, ndmin=2).T
hidden_layer = np.dot(self.wih, input_layer)
hidden_layer = self.activation_function(hidden_layer)
output_layer = np.dot(self.who, hidden_layer)
output_layer = self.activation_function(output_layer)
return output_layer
# number of input, hidden and output nodes
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
epochs = 1
# learning rate
learning_rate = 0.1
# creating the network
n = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
# train
data_f = open('mnist_train.csv', 'r')
data = data_f.readlines()
data_f.close()
for e in range(epochs):
for record in data:
all_values = record.split(',')
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
targets = np.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99
n.train(inputs ,targets)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment