Skip to content

Instantly share code, notes, and snippets.

@LizardLeliel
Created June 25, 2015 19:08
Show Gist options
  • Save LizardLeliel/027084dfe4d722e8f97e to your computer and use it in GitHub Desktop.
Save LizardLeliel/027084dfe4d722e8f97e to your computer and use it in GitHub Desktop.
Two multi-layered perception neural networks, one using theano and the other not.
# My first layered neural network thingy,
# Whooooo.
# No Theano just yet. Maybe Numpy.
# Numpy will require me to express everything
# with as little iteration as possible. That's
# what bugs me about that library >_<. You can
# only hope everything broadcasts correctly!
import numpy
import theano
import theano.tensor as T
def bisigmoid(x):
# Need to replace 2.71828 with a better constant
return 2.0/(1.0 + 2.71828**(-x)) - 1.0
def numpyLiteral(x):
return numpy.array(x, dtype = numpy.float32)
# A base layer for input layer, hidden layers,
# and output layers.
class Layer:
# W vector (array of array of float) (2d)
# bias vector (array of floats) (1d)
# number of neurons in this layer. (also dimension of output)
# how it'll receive its output
def __init__(self, neuronAmmount, weights, biases,
inputMethod=numpyLiteral, activation=bisigmoid):
self.neuronAmmount = neuronAmmount
self.weights = numpy.array(weights, dtype = numpy.float32)
self.biases = numpy.array(biases, dtype = numpy.float32)
self.inputs = inputMethod # inputMethod should be a function
self.activation = activation
def calculate(self, *args):
inputs = self.inputs(*args)
#output = numpy.empty(self.neuronAmmount, dtype = numpy.float32)
xput = numpy.dot(self.weights, inputs) + self.biases
#print(str(xput))
xput = self.activation(xput)
output = xput
return output
class MultilayerPerceptron:
#layer ammount DOESN'T INCLUDE INPUT LAYER
def __init__(self, layerAmmount, neuronAmmountList, weightsList,
biasesList, activation):
layerList = []
for i in range(layerAmmount):
layerList.append(
Layer(
neuronAmmountList[i],
weightsList[i],
biasesList[i],
# Note in Theano we'd assign this to the output
# value of the last layer, as a variable. Since
# theano is symbolic though, we would be allowed
# to do that even though it would be undefined
# normally (since theano is symbolic)
numpyLiteral if i == 0 else layerList[i-1].calculate,
activation
)
)
self.layers = layerList
# Wow that kind of was most of it
def calculate(self, *args):
return self.layers[-1].calculate(*args)
if __name__ == '__main__':
hiddenLayer = Layer(2, [[1.0,1.0],[1.0,1.0]], [0.0,-1.0])
outputLayer = Layer(1, [[3.0,-2.5]], [-1.2], hiddenLayer.calculate)
oneOne = outputLayer.calculate([1.0, 1.0])
oneZero = outputLayer.calculate([1.0, 0.0])
xorMLP = MultilayerPerceptron(
2,
[2,1],
# Clearly the line below shouldn't really be a literal
[[[1.0,1.0],[1.0,1.0]],[[3.0,-2.5]]],
[[0.0,-1.0],[-1.2]],
bisigmoid
)
print("Layer output for [1,1]: " + str(oneOne[0]))
print("Layer output for [1,0]: " + str(oneZero[0]))
print("MLP output for [0,1]: " + str(xorMLP.calculate([0.0, 1.0])[0]))
print("MLP output for [0,0]: " + str(xorMLP.calculate([0.0, 0.0])[0]))
print("Hello, world!")
import numpy
import theano
import theano.tensor as tensor
#bisigmoid = 2*tensor.nnet.sigmoid + 1
def bisigmoid(x):
return 2*tensor.nnet.sigmoid(x) - 1
class Layer:
# The length of biases should be the ammount
# of neurons in this layer. Input is symbolic.
def __init__(self, weights, biases, inputs, activation = bisigmoid):
self.weights = weights
self.biases = biases
self.linearProduct = tensor.dot(self.weights, inputs) + self.biases
self.output = activation(self.linearProduct)
# a lot of the code in the layer constructor sample at
# http://deeplearning.net/tutorial/logreg.html is for generating
# initial weights/biases if none were given.
# class MultilayerPerceptron:
# # So much is done in the __init__ when it comes to theano
# def __init__(self, initialInput, numberOfHiddens,)
class InitializedMultilayerPerceptron:
def __init__(self, initialInput, layerAmmount,
weightsList, biasesList, activation = bisigmoid):
lastLayer = None
#self.layers = []
self.weightsList = weightsList
self.biasesList = biasesList
for i in range(layerAmmount):
if lastLayer == None:
lastLayer = Layer(
self.weightsList[i],
self.biasesList[i],
initialInput,
activation
)
else:
lastLayer = Layer(
self.weightsList[i],
self.biasesList[i],
lastLayer.output,
activation
)
self.output = lastLayer.output
#self.output = self.layers[-1].output
# and that's about it
if __name__ == '__main__':
initialInput = tensor.vector('initialInput')
weights = [
theano.shared(numpy.array([[1.0,1.0],[1.0,1.0]])),
theano.shared(numpy.array([[3.0,-2.5]]))
]
biases = [
theano.shared(numpy.array([0.0, -1.0])),
theano.shared(numpy.array([-1.2]))
]
xorMLP = InitializedMultilayerPerceptron(
initialInput,
2,
weights,
biases
)
xor = theano.function([initialInput], xorMLP.output)
print("xor mlp for [1,0]: " + str(xor(numpy.array([1.0,0.0]))))
print("xor mlp for [0,0]: " + str(xor(numpy.array([0.0,0.0]))))
print("xor mlp for [1,0]: " + str(xor(numpy.array([1.0,1.0]))))
print("xor mlp for [0,1]: " + str(xor(numpy.array([0.0,1.0]))))
print("Bonjour toute le monde!")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment