Skip to content

Instantly share code, notes, and snippets.

@keshavsaharia
Last active April 21, 2017 13:14
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save keshavsaharia/369951a12508c49d7b587027323fa7e9 to your computer and use it in GitHub Desktop.
Save keshavsaharia/369951a12508c49d7b587027323fa7e9 to your computer and use it in GitHub Desktop.
A neural network implemented in Mathematica.
(* A neuron is a list, where the first element is the value, and the remaining elements are weights of
incoming connections to the neuron. Neurons are constructed by specifying the number of incoming synapses *)
Neuron[i_] := {0} ~Join~ ((2*RandomReal[] - 1) & /@ Range[i]);
(* Create a neuron with the specific weight and set of edges *)
Neuron[v_, w_] := Join[{v}, w];
(* A layer of the network is just a list of neurons *)
Layer[n_, i_] := Array[Neuron[i] &, n];
(* For simplicity, a network is three-layer perceptron network with an input layer, hidden layer, and output layer *)
Network[i_, h_, o_] := {Layer[i, 0], Layer[h, i], Layer[o, h]};
(* Utility functions for neurons, layers, and networks *)
NeuronValue[n_] := First[n];
NeuronWeight[n_, i_] := Part[n, i + 1];
LayerValues[l_] := First /@ l;
LayerWeights[l_, i_] := Part[#, i + 1] & /@ l;
NeuronWeights[n_] := Rest[n];
NeuronWeight[n_, i_] := n[[i + 1]];
NetworkOutput[n_] := First /@ Last[n];
(* Computes the next state of a network by applying the given inputs *)
Compute[network_, inputs_] := Fold[#1 ~Join~ { Compute[#2, Last[#1]] } &,
{ MapThread[Join[{#1}, #2] &, {inputs, Rest /@ First[network]}] },
Rest[network]] /; Depth[network] == 4;
(* Compute the next state of a layer in the network *)
Compute[layer_, values_] := Compute[#, values] & /@ layer /; Depth[layer] == 3;
(* Compute the next state of a neuron using sigmoid *)
Compute[neuron_, value_] := {LogisticSigmoid[Total[
MapThread[#1 * First[value[[#2]]] &,
{ Rest[neuron], Range[Length[neuron] - 1]}]]] }
~Join~ Rest[neuron] /; Depth[neuron] == 2;
(* Backpropagation *)
Propagate[network_, output_, learningRate_] := With[{
outputLayer = MapThread[
Propagate[#1, network[[2]], #2, learningRate] &,
{Last[network], output}],
hiddenLayer = network[[2]],
inputLayer = First[network]
},
{inputLayer, MapIndexed[Propagate[#1, First[#2],
inputLayer, outputLayer, output, learningRate] &,
hiddenLayer], outputLayer}
] /; Depth[network] == 4;
Propagate[neuron_, hiddenLayer_, target_, learningRate_] := Join[{NeuronValue[neuron]},
MapThread[
#1 - learningRate*
-NeuronValue[neuron]*(1 - NeuronValue[neuron])*
(target - NeuronValue[neuron])*#2 &,
{NeuronWeights[neuron], LayerValues[hiddenLayer]}]
];
Propagate[neuron_, index_, inputLayer_, outputLayer_, target_, learningRate_] := Join[{NeuronValue[neuron]},
MapIndexed[ #1 - learningRate *
NeuronValue[neuron]*(1 - NeuronValue[neuron])*
NeuronValue[inputLayer[[First[#2]]]]*
Total[MapThread[(#3 - #1)*-1*#1*(1 - #1)*#2 &, {
LayerValues[outputLayer],
LayerWeights[outputLayer, index],
target}]] &,
NeuronWeights[neuron]]]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment