Skip to content

Instantly share code, notes, and snippets.

@rolisz
Created July 17, 2014 12:04
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save rolisz/8e08f8c4330658d3dc8f to your computer and use it in GitHub Desktop.
Save rolisz/8e08f8c4330658d3dc8f to your computer and use it in GitHub Desktop.
Neural network
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.base import clone
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
from NeuralNetwork import NeuralNetwork
np.seterr(all='warn')
digits = load_digits()
X = digits.data
y = digits.target
X -= X.min()
X /= X.max()
def test_fit():
nn = NeuralNetwork([64, 100, 10],'relu', epochs=60000, learning_rate=0.05)
X_train, X_test, y_train, y_test = train_test_split(X, y)
labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
nn.fit(X_train,labels_train)
predictions = []
for i in range(X_test.shape[0]):
o = nn.predict(X_test[i] )
predictions.append(np.argmax(o))
print confusion_matrix(y_test,predictions)
print classification_report(y_test,predictions)
test_fit()
import numpy as np
from numpy.random import random as rand
class NeuralNetwork:
activations = {
'tanh': lambda x: np.tanh(x),
'relu': lambda x: np.clip(x, 0, np.max(x))
}
derivatives = {
'tanh': lambda x: 1.0 - np.tanh(x)**2,
'relu': lambda x: (np.sign(x) + 1)*0.5
}
def __init__(self, layers, activation='tanh', learning_rate=0.2, reg=0.0001,
epochs=10000):
self.activation = NeuralNetwork.activations[activation]
self.activation_deriv = NeuralNetwork.derivatives[activation]
self.learning_rate, self.epochs, self.reg = learning_rate, epochs, reg
self.initialize_weights(layers)
def initialize_weights(self, layers):
self.weights = []
i = 0
for i in range(0, len(layers)-2):
self.weights.append((2*rand((layers[i]+1, layers[i+1] + 1))-1)*0.25)
self.weights.append((2*rand((layers[-2]+1, layers[-1]))-1)*0.25)
def regularize(self, weights):
return self.reg * np.sum(weights)
def fit(self, X, y):
X = np.atleast_2d(X)
X = np.hstack((X, np.ones([1, X.shape[0]]).T))
y = np.array(y)
for k in range(self.epochs):
i = np.random.randint(X.shape[0])
a = [np.atleast_2d(X[i])]
for l in range(len(self.weights)):
a.append(self.activation(np.dot(a[l], self.weights[l])))
deltas = [(y[i] - a[-1]) * self.activation_deriv(a[-1])]
for l in range(len(a) - 2, 0, -1): # begin at the second to last layer
deltas.append(deltas[-1].dot(self.weights[l].T)*
self.activation_deriv(a[l]))
deltas.reverse()
for i in range(len(self.weights)):
self.weights[i] += self.learning_rate * (
a[i].T.dot(deltas[i]) - self.regularize(self.weights[i])
)
def predict(self, X):
X = np.append(X, 0)
for l in range(0, len(self.weights)):
X = self.activation(np.dot(X, self.weights[l]))
return X
if __name__ == '__main__':
nn = NeuralNetwork([2,5,1], 'relu', epochs=100000, learning_rate=0.5, reg=0)
X = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
y = np.array([0, 1, 1, 0])
nn.fit(X, y)
for i in [[0, 0], [0, 1], [1, 0], [1,1]]:
print(i,nn.predict(i))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment