Skip to content

Instantly share code, notes, and snippets.

@dyigitpolat
Last active June 9, 2017 06:48
Show Gist options
  • Save dyigitpolat/67d01d72e2aa94d8f8ec41079613602b to your computer and use it in GitHub Desktop.
Save dyigitpolat/67d01d72e2aa94d8f8ec41079613602b to your computer and use it in GitHub Desktop.
//
// inside the main class
//
public void trainNetwork()
{
for( int i = 0; i < 1000000; i++)
{
int r = (int) (Math.random() * 100000) % 4;
int a = r % 2;
int b = (r >> 1) % 2;
int c = a ^ b;
double y = output.output();
double dErr = (y - c); //Err = 0.5*(y - c)*(y - c)
input1.setInput( a); //ok
input2.setInput( b); //ok
output.backPropagate( dErr ); // ?
}
}
//
// inside the neuron class
//
public void backPropagate( double dErr)
{
if( isInputNeuron)
{
return;
}
double hj = inputSum(); //works well
double yj = output(); //works well
for( int i = 0; i < inputSynapses.size(); i++)
{
double xi = inputSynapses.get(i).output();
double dErrj = activationDerivative( hj)*dErr;
inputSynapses.get(i).backPropagate( dErrj*weights[i] ); //beware, recursion.
weights[i] -= learningRate*dErrj*xi;
}
}
private double activationDerivative( double x) //sigmoid
{
return activation(x)*(1.0 - activation(x));
}
private double activation( double arg) //sigmoid
{
return 1.0/(1 + Math.exp( -arg));
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment