Skip to content

Instantly share code, notes, and snippets.

// Updating the parameters
W3 = W3 - lr * dW3;
W2 = W2 - lr * dW2;
W1 = W1 - lr * dW1;
// dW1 = X.T * dz1
vector<float> dW1 = dot(transpose( &b_X[0], BATCH_SIZE, 784 ), dz1, 784, BATCH_SIZE, 128);
// dz1 = dz2 * W2.T * relu'(a1)
vector<float> dz1 = dot(dz2, transpose( &W2[0], 128, 64 ), BATCH_SIZE, 64, 128) * reluPrime(a1);
// dW2 = a1.T * dz2
vector<float> dW2 = dot(transpose( &a1[0], BATCH_SIZE, 128 ), dz2, 128, BATCH_SIZE, 64);
// dz2 = dyhat * W3.T * relu'(a2)
vector<float> dz2 = dot(dyhat, transpose( &W3[0], 64, 10 ), BATCH_SIZE, 10, 64) * reluPrime(a2);
vector<float> dW3 = dot(transpose( &a2[0], BATCH_SIZE, 64 ), dyhat, 64, BATCH_SIZE, 10);
vector<float> dyhat = yhat - b_y;
vector <float> relu(const vector <float>& z){
int size = z.size();
vector <float> output;
for( int i = 0; i < size; ++i ) {
if (z[i] < 0){
output.push_back(0.0);
}
else output.push_back(z[i]);
}
return output;
// # Feed forward
vector<float> a1 = relu(dot( b_X, W1, BATCH_SIZE, 784, 128 ));
vector<float> a2 = relu(dot( a1, W2, BATCH_SIZE, 128, 64 ));
vector<float> yhat = softmax(dot( a2, W3, BATCH_SIZE, 64, 10 ), 10);
// # Back propagation
vector<float> dyhat = yhat - b_y;
vector<float> dW3 = dot(transpose( &a2[0], BATCH_SIZE, 64 ), dyhat, 64, BATCH_SIZE, 10);
vector<float> dz2 = dot(dyhat, transpose( &W3[0], 64, 10 ), BATCH_SIZE, 10, 64) * reluPrime(a2);
vector<float> dW2 = dot(transpose( &a1[0], BATCH_SIZE, 128 ), dz2, 128, BATCH_SIZE, 64);
@sbugrov
sbugrov / nn.cpp
Last active December 25, 2020 03:11
//
// nn.cpp
//
// To compile: g++ -o nn nn.cpp -std=c++11
// To run: ./nn
// Created by Sergei Bugrov on 4/20/18.
// Copyright © 2017 Sergei Bugrov. All rights reserved.
// Download dataset from: https://drive.google.com/file/d/1OdtwXHf_-2T0aS9HLBnxU3o-72mklCZY/view?usp=sharing
#include <iostream>