Skip to content

Instantly share code, notes, and snippets.

@sbugrov
sbugrov / gutenberg.py
Last active November 29, 2023 10:17
# -*- coding: utf-8 -*-
# Sergei Bugrov
# 7-9-17
#
# Downloads all available books in English language in .txt format from http://www.gutenberg.org,
# unpacks them from .zip archives, saves them to ../books/ folder, and deletes .zip files.
#
# usage : python gutenberg.py
#
vector <float> sigmoid (const vector <float>& m1) {
/* Returns the value of the sigmoid function f(x) = 1/(1 + e^-x).
Input: m1, a vector.
Output: 1/(1 + e^-x) for every element of the input matrix m1.
*/
const unsigned long VECTOR_SIZE = m1.size();
vector <float> output (VECTOR_SIZE);
@sbugrov
sbugrov / main.cpp
Last active January 2, 2023 05:32
//
// main.cpp
// mlperceptron
//
// Created by Sergei Bugrov on 7/1/17.
// Copyright © 2017 Sergei Bugrov. All rights reserved.
//
#include <iostream>
#include <vector>
int main(int argc, const char * argv[]) {
for (unsigned i = 0; i != 50; ++i) {
vector<float> pred = sigmoid(dot(X, W, 4, 4, 1 ) );
vector<float> pred_error = y - pred;
vector<float> pred_delta = pred_error * sigmoid_d(pred);
vector<float> W_delta = dot(transpose( &X[0], 4, 4 ), pred_delta, 4, 4, 1);
W = W + W_delta;
};
return 0;
}
@sbugrov
sbugrov / nn.cpp
Last active December 25, 2020 03:11
//
// nn.cpp
//
// To compile: g++ -o nn nn.cpp -std=c++11
// To run: ./nn
// Created by Sergei Bugrov on 4/20/18.
// Copyright © 2017 Sergei Bugrov. All rights reserved.
// Download dataset from: https://drive.google.com/file/d/1OdtwXHf_-2T0aS9HLBnxU3o-72mklCZY/view?usp=sharing
#include <iostream>
//
// onehiddenlayerperceptron.cu
// onehiddenlayerperceptron
//
// Created by Sergei Bugrov on 8/21/17.
// Copyright © 2017 Sergei Bugrov. All rights reserved.
//
#include <stdio.h>
@sbugrov
sbugrov / learn.cu
Last active October 27, 2018 12:21
__global__ void kFit( const float* X, const int X_w, const int X_h, const float* y, const int y_w, float* l1, const int l1_w, float* l_1_d, float* pred, float* pred_d, float* W0, float* W1, float* buffer) {
for (unsigned i = 0; i < 50; ++i) {
dSigmoid(dDot(X, W0, l1, X_h, X_w, l1_w), l1, X_h, l1_w);
dSigmoid(dDot(l1, W1, pred, X_h, l1_w, y_w), pred, X_h, y_w);
dMartixByMatrixElementwise(dMartixSubstractMatrix(y, pred, pred_d, X_h, y_w), dSigmoid_d(pred, buffer, X_h, y_w), pred_d, X_h, y_w );
dMartixByMatrixElementwise(dDot_m1_m2T(pred_d, W1, l_1_d, X_h, y_w, l1_w), dSigmoid_d(l1, buffer, X_h, l1_w), l_1_d, X_h, l1_w);
dDot_m1T_m2( l1, pred_d, W1, X_h, l1_w, y_w );
dDot_m1T_m2( X, l_1_d, W0, X_h, X_w, l1_w );
}
}
// Updating the parameters
W3 = W3 - lr * dW3;
W2 = W2 - lr * dW2;
W1 = W1 - lr * dW1;
// dW1 = X.T * dz1
vector<float> dW1 = dot(transpose( &b_X[0], BATCH_SIZE, 784 ), dz1, 784, BATCH_SIZE, 128);
// dz1 = dz2 * W2.T * relu'(a1)
vector<float> dz1 = dot(dz2, transpose( &W2[0], 128, 64 ), BATCH_SIZE, 64, 128) * reluPrime(a1);