Skip to content

Instantly share code, notes, and snippets.

@Aakash-kaushik
Aakash-kaushik / header_files.cpp
Last active September 26, 2020 13:14
including the libraries and defining some namespaces
#include <mlpack/core.hpp>
#include <mlpack/core/data/split_data.hpp>
#include <mlpack/methods/ann/layer/layer.hpp>
#include <mlpack/methods/ann/ffn.hpp>
#include <ensmallen.hpp> /* The numerical optimization library that mlpack uses */
using namespace mlpack;
using namespace mlpack::ann;
// Namespace for the armadillo library(linear algebra library).
@Aakash-kaushik
Aakash-kaushik / helper_function.cpp
Created September 25, 2020 16:04
helper function to get labels
arma::Row<size_t> getLabels(arma::mat predOut)
{
arma::Row<size_t> predLabels(predOut.n_cols);
for(arma::uword i = 0; i < predOut.n_cols; ++i)
{
predLabels(i) = predOut.col(i).index_mat() + 1;
}
return predLabels;
}
int main()
{
constexpr double RATIO = 0.1; // ratio to divide the data in train and val set.
constexpr int MAX_ITERATIONS = 0; // set to zero to allow infinite iterations.
constexpr double STEP_SIZE = 1.2e-3;// step size for Adam optimizer.
constexpr int BATCH_SIZE = 50;
constexpr size_t EPOCH_I = 2;
cout << "Reading data ..." << endl;
mat tempDataset;
data::Load("../data/mnist_train.csv", tempDataset, true);
@Aakash-kaushik
Aakash-kaushik / load_declare.cpp
Last active September 25, 2020 17:20
loading and splitting the data and declaring some constants
constexpr double RATIO = 0.1; // ratio to divide the data in train and val set.
constexpr int MAX_ITERATIONS = 0; // set to zero to allow infinite iterations.
constexpr double STEP_SIZE = 1.2e-3;// step size for Adam optimizer.
constexpr int BATCH_SIZE = 50;
constexpr size_t EPOCH = 2;
mat tempDataset;
data::Load("train.csv", tempDataset, true);
mat tempTest;
@Aakash-kaushik
Aakash-kaushik / processing_data.cpp
Last active September 25, 2020 17:05
procesing the data
mat dataset = tempDataset.submat(0, 1, tempDataset.n_rows - 1, tempDataset.n_cols - 1);
mat test = tempTest.submat(0, 1, tempTest.n_rows - 1, tempTest.n_cols - 1);
mat train, valid;
data::Split(dataset, train, valid, RATIO);
const mat trainX = train.submat(1, 0, train.n_rows - 1, train.n_cols - 1);
const mat validX = valid.submat(1, 0, valid.n_rows - 1, valid.n_cols - 1);
const mat testX = test.submat(1, 0, test.n_rows - 1, test.n_cols - 1);
@Aakash-kaushik
Aakash-kaushik / model.cpp
Last active September 26, 2020 13:19
declaring the model architecture
FFN<NegativeLogLikelihood<>, RandomInitialization> model;
model.Add<Convolution<>>(1, // Number of input activation maps.
6, // Number of output activation maps.
5, // Filter width.
5, // Filter height.
1, // Stride along width.
1, // Stride along height.
0, // Padding width.
0, // Padding height.
@Aakash-kaushik
Aakash-kaushik / training.cpp
Last active September 26, 2020 13:21
training the model
ens::Adam optimizer(
STEP_SIZE, // Step size of the optimizer.
BATCH_SIZE, // Batch size. Number of data points that are used in each iteration.
0.9, // Exponential decay rate for the first moment estimates.
0.999, // Exponential decay rate for the weighted infinity norm estimates.
1e-8, // Value used to initialise the mean squared gradient parameter.
MAX_ITERATIONS, // Max number of iterations.
1e-8, // Tolerance.
true);
@Aakash-kaushik
Aakash-kaushik / prediction.cpp
Created September 25, 2020 16:59
getting the predicitons and accuracy
mat predOut;
model.Predict(trainX, predOut);
arma::Row<size_t> predLabels = getLabels(predOut);
double trainAccuracy = arma::accu(predLabels == trainY) / ( double )trainY.n_elem * 100;
model.Predict(validX, predOut);
predLabels = getLabels(predOut);
double validAccuracy = arma::accu(predLabels == validY) / ( double )validY.n_elem * 100;
std::cout << "Accuracy: train = " << trainAccuracy << "%,"<< "\t valid = " << validAccuracy << "%" << std::endl;
mat testPredOut;
@Aakash-kaushik
Aakash-kaushik / model_save.cpp
Created September 25, 2020 17:06
saving the model
mlpack::data::Save("model.bin", "model", model, false);
@Aakash-kaushik
Aakash-kaushik / load_model.cpp
Created September 25, 2020 18:58
Loading a model
FFN<NegativeLogLikelihood<>, RandomInitialization> friend_model;
mlpack::data::load("model.bin", "model", friend_model);