Navigation Menu

Skip to content

Instantly share code, notes, and snippets.

@Tumurtogtokh
Last active March 19, 2019 00:27
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Tumurtogtokh/42e14aa8b7e2f5d06ce5aa8833ddb281 to your computer and use it in GitHub Desktop.
Save Tumurtogtokh/42e14aa8b7e2f5d06ce5aa8833ddb281 to your computer and use it in GitHub Desktop.
Python Code snippet for: https://wp.me/p5EUYy-fq
class NeuralNetwork(object):
'''
Compact 3 layer neural network
(input -> hidden -> output)
'''
def __init__(self, input_nodes, hidder_layers, output_nodes, learning_rate):
self.i_nodes = input_nodes
self.h_nodes = hidder_layers
self.o_nodes = output_nodes
self.alpha = learning_rate
# Weight matrix between input -> hidden -> output
self.wih = np.random.normal(0.0, pow(self.i_nodes, -0.5), (self.h_nodes, self.i_nodes))
self.who = np.random.normal(0.0, pow(self.h_nodes, -0.5), (self.o_nodes, self.h_nodes))
# Activation function expit - sigmoid
self.activation_func = lambda x: sigmoid(x)
def train(self, input_list, target_list):
inputs = np.array(input_list, ndmin=2).T
targets = np.array(target_list, ndmin=2).T
# signals into hidden layer
hidden_inputs = np.dot(self.wih, inputs)
# signals from hidden layer
hidden_outputs = self.activation_func(hidden_inputs)
# signals into output
final_inputs = np.dot(self.who, hidden_outputs)
# signals from output
outputs = self.activation_func(final_inputs)
# Error between hidden and output
output_errors = targets - outputs
# error between input and hiddent
hidden_errors = np.dot(self.who.T, output_errors)
# Updating weight between hidden and output layers
self.who += self.alpha * np.dot((output_errors * outputs * (1.0 - outputs)),
np.transpose(hidden_outputs))
# Updating weight between input and hidden
self.wih += self.alpha * np.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)),
np.transpose(inputs))
def query(self, input_list):
inputs = np.array(input_list, ndmin=2).T
# signals into hidden input
hidden_inputs = np.dot(self.wih, inputs)
# signals from hidden input
hidden_outputs = self.activation_func(hidden_inputs)
# signals into output layer
final_inputs = np.dot(self.who, hidden_outputs)
# signals from output layer
outputs = self.activation_func(final_inputs)
return outputs
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment