Skip to content

Instantly share code, notes, and snippets.

@afonsomatos
Created January 31, 2019 00:43
Show Gist options
  • Save afonsomatos/bed9b6cb8c7a01d9337364c1c0594ac8 to your computer and use it in GitHub Desktop.
Save afonsomatos/bed9b6cb8c7a01d9337364c1c0594ac8 to your computer and use it in GitHub Desktop.
neural in c++
#include <iostream>
#include "NeuralNetwork.h"
using namespace std;
using namespace Eigen;
NeuralNetwork::NeuralNetwork(vector<int> nodes, double learning_rate)
: nodes{ nodes }, learning_rate{ learning_rate }, layers { nodes.size() }
{
// Initialize weights
MatrixXd mat;
for (size_t i = 0; i < layers - 1; ++i)
{
mat = MatrixXd::Random(nodes[i + 1], nodes[i]);
weights.push_back(mat);
}
}
void NeuralNetwork::train(VectorXd input, VectorXd target)
{
vector<VectorXd> result = query(input);
VectorXd error = target - result.back();
vector<VectorXd>::reverse_iterator rt = result.rbegin();
vector<MatrixXd>::reverse_iterator wt = weights.rbegin();
VectorXd previous;
ArrayXd z;
MatrixXd delta;
// Back propagation algorithm
for (; rt != result.rend() - 1; ++rt, wt++)
{
previous = *(rt + 1);
z = rt->array();
z *= error.array() * (1 - z);
delta = learning_rate * z.matrix() * previous.transpose();
// Next layer's error
error = wt->transpose() * error;
*wt += delta;
}
}
double expit(double x)
{
return 1 / (1 + exp(-x));
}
vector<VectorXd> NeuralNetwork::query(VectorXd output)
{
vector<VectorXd> result{ output };
for (MatrixXd & w : weights)
{
output = (w * output).unaryExpr(&expit);
result.push_back(output);
}
return result;
}
#pragma once
#include <vector>
#include <Eigen/Dense>
struct NeuralNetwork
{
NeuralNetwork(std::vector<int> nodes, double learning_rate);
std::vector<int> nodes;
double learning_rate;
size_t layers;
std::vector<Eigen::MatrixXd> weights;
void train(Eigen::VectorXd input, Eigen::VectorXd target);
std::vector<Eigen::VectorXd> query(Eigen::VectorXd input);
};
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment