Skip to content

Instantly share code, notes, and snippets.

@tobspr
Created April 24, 2016 08:37
Show Gist options
  • Save tobspr/b2646085835fc244b8f9a0a38c90ab83 to your computer and use it in GitHub Desktop.
Save tobspr/b2646085835fc244b8f9a0a38c90ab83 to your computer and use it in GitHub Desktop.
#include "NeuronalNetwork.h"
NeuronalNetwork::NeuronalNetwork(int numInputs, int numOutputs, int numHiddenLayers, int numLayerNodes)
{
_numInputs = numInputs;
_numOutputs = numOutputs;
_numHiddenLayers = numHiddenLayers;
_numLayerNodes = numLayerNodes;
// Init weights and biases
_weights.reserve(numHiddenLayers + 1);
_biases.reserve(numHiddenLayers + 1);
float weightScale = 1.0;
// Input Layer
_weights.push_back(MatrixXf::Random(numLayerNodes, numInputs) * weightScale);
_biases.push_back(MatrixXf::Random(numLayerNodes, 1) * weightScale);
// Hidden Layers + Output Layer
for (int i = 0; i < numHiddenLayers; i++) {
int nextNodes = i == numHiddenLayers - 1 ? numOutputs : numLayerNodes;
_weights.push_back(MatrixXf::Random(nextNodes, numLayerNodes) * weightScale);
_biases.push_back(MatrixXf::Random(nextNodes, 1) * weightScale);
}
}
MatrixXf NeuronalNetwork::simulate(MatrixXf &inputs) {
MatrixXf currentActivatedValues = inputs;
for (int i = 0; i < _weights.size(); i++) {
MatrixXf summedValues = _weights[i] * currentActivatedValues + _biases[i];
// Perform activation function with the summed values
for (int k = 0; k < summedValues.size(); k++) {
summedValues(k) = tanh( summedValues.coeff(k) );
}
currentActivatedValues = summedValues;
}
return currentActivatedValues;
}
vector<double> NeuronalNetwork::allCoeffsAsArray() {
vector<double> result;
// Add all weights
for (int i = 0; i < _weights.size(); i++) {
for (int k = 0; k < _weights[i].size(); k++) {
result.push_back(_weights[i](k));
}
}
// Add all biases
for (int i = 0; i < _biases.size(); i++) {
for (int k = 0; k < _biases[i].size(); k++) {
result.push_back(_biases[i](k));
}
}
return result;
}
double getRand(float factor) {
// Returns a random number
double range = 0.013;
double exchange = (double)rand() / RAND_MAX;
if (exchange > 1.0 - factor) {
double f = (double)rand() / RAND_MAX;
return -range + f * 2.0 * range;
}
return 0.0;
}
void NeuronalNetwork::setCoeffs(vector <double> coefficients, float randomizationFactor) {
int currentIndex = 0;
// Fetch all weights
for (int i = 0; i < _weights.size(); i++) {
for (int k = 0; k < _weights[i].size(); k++) {
_weights[i](k) = coefficients[currentIndex] + getRand(randomizationFactor);
currentIndex ++;
}
}
// Fetch all biases
for (int i = 0; i < _biases.size(); i++) {
for (int k = 0; k < _biases[i].size(); k++) {
_biases[i](k) = coefficients[currentIndex] + getRand(randomizationFactor);
currentIndex ++;
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment