Skip to content

Instantly share code, notes, and snippets.

@Kaupenjoe
Created November 18, 2015 19:50
Show Gist options
  • Save Kaupenjoe/af36378f30d90216dfaa to your computer and use it in GitHub Desktop.
Save Kaupenjoe/af36378f30d90216dfaa to your computer and use it in GitHub Desktop.
#include "matrix.h"
#include "PolynomRegression.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
std::vector<std::vector<double> > computeTrainingSin(unsigned int pA)
{
//Temp Vectors
std::vector<double> tempTrainingSinX (pA+1);
std::vector<double> tempTrainingSinT (pA+1);
std::vector<std::vector<double> > tempTrainingSin(2);
for (int i = 1; i <= pA; ++i)
{
tempTrainingSinX[i] =
((((2.0 * i - 1.0) * M_PI) / pA) - M_PI + 0.01);
tempTrainingSinT[i] =
sin(tempTrainingSinX[i]/2.0);
}
tempTrainingSin[0] = tempTrainingSinX;
tempTrainingSin[1] = tempTrainingSinT;
cout << "TEST SIN" << tempTrainingSin[0][1] << endl;
return tempTrainingSin;
}
std::vector<std::vector<double> > computeTestSin(unsigned int pA)
{
std::vector<double> tempTestSinX (pA+1);
std::vector<double> tempTestSinT (pA+1);
std::vector<std::vector<double> > tempTestSin (2);
for (int i = 1; i <= pA; ++i)
{
tempTestSinX[i] =
((((2.0 * i * M_PI) / pA) - M_PI + 0.01));
tempTestSinT[i] =
sin(tempTestSinX[i]/2.0);
}
tempTestSin[0] = tempTestSinX;
tempTestSin[1] = tempTestSinT;
return tempTestSin;
}
std::vector<std::vector<double> > computeTrainingSinc(unsigned int pA)
{
std::vector<double> tempTrainingSinX (pA+1);
std::vector<double> tempTrainingSinT (pA+1);
std::vector<std::vector<double> > tempTrainingSin(2);
for (int i = 1; i <= pA; ++i)
{
tempTrainingSinX[i] =
((((2.0 * i - 1.0) * M_PI)/pA) - M_PI + 0.01);
tempTrainingSinT[i] =
(sin(4 * tempTrainingSinX[i])) / tempTrainingSinX[i];
}
tempTrainingSin[0] = tempTrainingSinX;
tempTrainingSin[1] = tempTrainingSinT;
return tempTrainingSin;
}
std::vector<std::vector<double> > computeTestSinc(unsigned int pA)
{
std::vector<double> tempTestSinX (pA+1);
std::vector<double> tempTestSinT (pA+1);
std::vector<std::vector<double> > tempTestSin (2);
for (int i = 1; i <= pA; ++i)
{
tempTestSinX[i] =
((((2.0 * i * M_PI) / pA) - M_PI + 0.01));
tempTestSinT[i] =
(sin(4 * tempTestSinX[i])) / tempTestSinX[i];
}
tempTestSin[0] = tempTestSinX;
tempTestSin[1] = tempTestSinT;
return tempTestSin;
}
int main(int argc, char** argv) {
knn::init();
//number of training examples: parameter 1, if given, else 11
unsigned int P = argc > 1 ? (unsigned)atoi(argv[1]) : 11;
//maximum polynomal degree: parameter 2, if given, else 19
unsigned int MMax = argc > 2 ? (unsigned)atoi(argv[2]) : 19;
std::ofstream sinOutL;
sinOutL.open("SinError.txt", std::ios::out);
std::ofstream sincOutL;
sincOutL.open("SincError.txt", std::ios::out);
for (unsigned int mL=0;mL<=MMax;++mL)
{
PolynomRegression regressorL(mL);
std::vector<std::vector<double> > trainingDataL=computeTrainingSin(P);
cout << "TRAINING DATA: " << trainingDataL[0][1] << endl;
regressorL.setXandT(trainingDataL[0],trainingDataL[1]);
regressorL.computeAandBandW();
sinOutL<<mL<<" "<<regressorL.error()<<" ";
std::vector<std::vector<double> > testDataL=computeTestSin(P);
regressorL.setXandT(testDataL[0],testDataL[1]);
sinOutL<<regressorL.error()<<std::endl;
}
for (unsigned int mL=0;mL<=MMax;++mL)
{
PolynomRegression regressorL(mL);
std::vector<std::vector<double> > trainingDataL=computeTrainingSinc(P);
regressorL.setXandT(trainingDataL[0],trainingDataL[1]);
regressorL.computeAandBandW();
sincOutL<<mL<<" "<<regressorL.error()<<" ";
std::vector<std::vector<double> > testDataL=computeTestSinc(P);
regressorL.setXandT(testDataL[0],testDataL[1]);
sincOutL<<regressorL.error()<<std::endl;
}
sinOutL.close();
sincOutL.close();
return 0;
}
#ifndef KNN1_POLYNOMREGRESSION_H
#define KNN1_POLYNOMREGRESSION_H
#include <iostream>
#include "matrix.h"
using namespace std;
class PolynomRegression {
public:
PolynomRegression(unsigned int mA);
void setXandT(std::vector<double>xA,std::vector<double>tA);
void computeAandBandW(void);
inline double y(double xA);
double error(void);
private:
std::vector<double> xInE;
std::vector<double> tInE;
knn::matrix AE; //x^k+m matrix
knn::matrix bE; //tp*xp^k Matrix
knn::matrix wE; //weight Matrix
unsigned int mE; //model complexity
};
PolynomRegression::PolynomRegression(unsigned int mA)
{
//model complextity
mE = mA;
}
void PolynomRegression::setXandT(std::vector<double> xA, std::vector<double> tA)
{
xInE=xA;
tInE=tA;
}
//Calculates the "Y"-Function
double PolynomRegression::y(double xA)
{
double tempY = 0.0;
for(int m = 0; m <= mE; ++m)
{
tempY += wE.operator()(m + 1, 1) * pow(xA, m);
}
return tempY;
}
//Calculates the Error Function
double PolynomRegression::error(void)
{
double tempErr = 0.0;
for (int p = 1; p <= xInE.size(); ++p)
{
tempErr += pow(((this->y(xInE[p])) - tInE[p]), 2);
}
return tempErr;
}
void PolynomRegression::computeAandBandW(void)
{
//Temporary Matrices with Dimension of model complexity + 1
knn::matrix tempAE(mE+1, mE+1);
knn::matrix tempBE(mE+1, 1);
knn::matrix tempWE(mE+1, 1);
std::ofstream logFile;
logFile.open("log.txt",std::ios::out);
for (int k = 0; k <= mE; ++k)
{
for (int m = 0; m <= mE; ++m)
{
//Fills the Matrix A with xp^k+m
for (int p = 1; p < xInE.size(); ++p)
{
//cout << "xInE[" << p << "]: " << xInE[p] << endl;
//cout << "k: " << k << "m: " << m << pow(xInE[p], k + m) << endl;
//k+1 and m+1 as Matrices start at index 1
tempAE.operator()(m+1, k+1) += pow(xInE[p], k + m); //AE(m,k) = xp^k+m
logFile << k+1 << " " << m+1 << " " << tempAE.operator()(k + 1, m + 1) << endl;
logFile << p << " " << xInE[p] << ": " << pow(xInE[p], k + m) << endl;
}
}
for (int p = 1; p < xInE.size(); ++p)
{
tempBE.operator()(k+1, 1) += (tInE[p] * pow((xInE[p]), k));
logFile << k+1 << " " << tempBE.operator()(k + 1, 1) << endl;
}
}
AE.operator=(tempAE);
bE.operator=(tempBE);
//Invert the Matrix
AE.invert();
//A^-1 * b = w | Calculates the Weight Matrix with the Inverted Matrix of A
tempWE.operator=(AE.operator*(bE));
wE.operator=(tempWE);
wE = AE * bE;
logFile.close();
}
#endif KNN1_POLYNOMREGRESSION_H
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment