Last active
February 25, 2023 11:20
-
-
Save omaraflak/1a63d87d8cda6dcf857673112ca04b50 to your computer and use it in GitHub Desktop.
Part of : https://medium.com/datadriveninvestor/math-neural-network-from-scratch-in-python-d6da9f29ce65
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class Network: | |
def __init__(self): | |
self.layers = [] | |
self.loss = None | |
self.loss_prime = None | |
# add layer to network | |
def add(self, layer): | |
self.layers.append(layer) | |
# set loss to use | |
def use(self, loss, loss_prime): | |
self.loss = loss | |
self.loss_prime = loss_prime | |
# predict output for given input | |
def predict(self, input_data): | |
# sample dimension first | |
samples = len(input_data) | |
result = [] | |
# run network over all samples | |
for i in range(samples): | |
# forward propagation | |
output = input_data[i] | |
for layer in self.layers: | |
output = layer.forward_propagation(output) | |
result.append(output) | |
return result | |
# train the network | |
def fit(self, x_train, y_train, epochs, learning_rate): | |
# sample dimension first | |
samples = len(x_train) | |
# training loop | |
for i in range(epochs): | |
err = 0 | |
for j in range(samples): | |
# forward propagation | |
output = x_train[j] | |
for layer in self.layers: | |
output = layer.forward_propagation(output) | |
# compute loss (for display purpose only) | |
err += self.loss(y_train[j], output) | |
# backward propagation | |
error = self.loss_prime(y_train[j], output) | |
for layer in reversed(self.layers): | |
error = layer.backward_propagation(error, learning_rate) | |
# calculate average error on all samples | |
err /= samples | |
print('epoch %d/%d error=%f' % (i+1, epochs, err)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment