Skip to content

Instantly share code, notes, and snippets.

@harujoh
Last active March 21, 2016 09:54
Show Gist options
  • Save harujoh/4535b2f5dc4fd955ea54 to your computer and use it in GitHub Desktop.
Save harujoh/4535b2f5dc4fd955ea54 to your computer and use it in GitHub Desktop.
using System;
using System.Linq;
using NumNet;
namespace MatrixNN
{
class Program
{
static void Main()
{
// initialize
MLP mlp = new MLP(2, 3, 1);
var inputs = NdArray<double>.FromArray(new[,] { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } });
var targets = NdArray<double>.FromArray(new[] { 0.0, 1.0, 1.0, 0.0 });
// training
mlp.Fit(inputs, targets);
// predict
Console.WriteLine("--- predict ---");
foreach (var input in new[] { new[] { 0.0, 0.0 }, new[] { 0.0, 1.0 }, new[] { 1.0, 0.0 }, new[] { 1.0, 1.0 } })
{
var array = NdArray<double>.FromArray(input);
Console.WriteLine(array + ":" + mlp.predict(array));
}
Console.Read();
}
}
class MLP
{
/*
3 Layered Perceptron
*/
NumNetMethods np = new NumNetMethods();
private readonly int nin;
private readonly int nhid;
private readonly int nout;
private NdArray<double> v;
private NdArray<double> w;
public MLP(int nInputUnits, int nHiddenUnits, int nOutputUnits)
{
this.nin = nInputUnits;
this.nhid = nHiddenUnits;
this.nout = nOutputUnits;
this.v = np.Random.Uniform(-1.0, 1.0, new[] { this.nhid, this.nin + 1 });
this.w = np.Random.Uniform(-1.0, 1.0, new[] { this.nout, this.nhid + 1 });
}
public void Fit(NdArray<double> inputs, NdArray<double> targets, double learningRate = 0.8, int epochs = 10000)
{
inputs = AddBias(inputs, 1);
foreach (var loopCount in Enumerable.Range(0, epochs))
{
// randomise the order of the inputs
var p = np.Random.Randint(inputs.Shape[0]).ToString();
var xp = inputs[p];
var bkp = targets[p];
// forward phase
var gjp = Sigmoid(np.Dot(v, xp));
gjp = AddBias(gjp);
var gkp = Sigmoid(np.Dot(w, gjp));
// backward phase(back prop)
var eps2 = SigmoidDeriv(gkp) * (gkp - bkp);
var eps = SigmoidDeriv(gjp) * np.Dot(w.T, eps2);
gjp = np.Atleast2d(gjp);
eps2 = np.Atleast2d(eps2);
w = w - learningRate*np.Dot(eps2.T, gjp);
xp = np.Atleast2d(xp);
eps = np.Atleast2d(eps);
v = v - learningRate * np.Dot(eps.T, xp)["1:, :"];
}
}
NdArray<double> AddBias(NdArray<double> inputs, int axis = 0)
{
return np.Insert(inputs, 0, 1, axis);
}
NdArray<double> Sigmoid(NdArray<double> u)
{
/* Sigmoid function(Activation function) */
return 1.0 / (1.0 + np.Exp(-u));
}
NdArray<double> SigmoidDeriv(NdArray<double> u)
{
return u * (1 - u);
}
public NdArray<double> predict(NdArray<double> input)
{
input = AddBias(input);
var gjp = Sigmoid(np.Dot(v, input));
gjp = AddBias(gjp);
return Sigmoid(np.Dot(w, gjp));
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment