Skip to content

Instantly share code, notes, and snippets.

@font_family {
font-family: 'Baskerville Monospace';
font-style: normal;
font-weight: 200;
font-stretch: 100%;
font-display: swap;
src: url(data:font/truetype;charset=utf-8;base64,AAEAAAASAQAABAAgRkZUTYvjCWYAAlvcAAAAHEdERUYAJQAAAAIXKAAAABhHUE9Tf9jlDgACGjQA AEGoR1NVQjRwLYwAAhdAAAAC8k9TLzKIR2iXAAABqAAAAGBjbWFwJlVXBwAACRwAAAPmY3Z0IAKJ DUgAABb8AAAALmZwZ23kLgKEAAANBAAACWJnYXNwAAAAEAACFyAAAAAIZ2x5ZuSNqAAAABrEAAHq mGhlYWQN5YfTAAABLAAAADZoaGVhBh8BxAAAAWQAAAAkaG10eA96Kn4AAAIIAAAHEmxvY2E1PLf6 AAAXLAAAA5ZtYXhwAysFNAAAAYgAAAAgbmFtZb5d3KwAAgVcAAAGZnBvc3Rgi+IvAAILxAAAC1tw cmVw/Jcg2AAAFmgAAACTAAEAAAABAACEYUyIXw889QAfA+gAAAAAzN15zAAAAADfgspc/rP+/AOf A8oAAAAIAAIAAAAAAAAAAQAAA8r+8gAAAlj+s/66A58AAQAAAAAAAAAAAAAAAAAAAb8AAQAAAcoA kAAHAAAAAAACADAAPgBqAAAAvwRkAAAAAAADAlgBkAAFAAACvAKKAAAAjAK8AooAAAHdADIA+gAA AgAAAAAAAAAAAKAAAL9QAABbAAAAAAAAAABweXJzAEAAIPsGA8r+8gAAA8oBDgAAAJMAAAAAAhID AgAAACAAAwJYADoAAAAAAlgAAAJYAAACWADrAlgAsAJYAAcCWAAbAlgAAQJY/64CWAD8AlgAtQJY AJICWABpAlgAQgJYANQCWACRAlgA6gJYAGMCWAAIAlgAkgJYACoCWAArAlgAGQ
/* cyrillic-ext */
@font-face {
font-family: 'Open Sans';
font-style: normal;
font-weight: 300;
font-stretch: 100%;
font-display: swap;
src: url(https://fonts.gstatic.com/s/opensans/v34/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSKmu0SC55K5gw.woff2) format('woff2');
unicode-range: U+0460-052F, U+1C80-1C88, U+20B4, U+2DE0-2DFF, U+A640-A69F, U+FE2E-FE2F;
}
@palashahuja
palashahuja / .vimrc
Last active February 24, 2020 18:47
minimal vimrc setup
" - For Neovim: stdpath('data') . '/plugged'
" - Avoid using standard Vim directory names like 'plugin'
call plug#begin('~/.vim/plugged')
Plug 'dracula/vim', { 'as': 'dracula' }
Plug 'jceb/vim-orgmode'
Plug 'tpope/vim-speeddating'
call plug#end()
set relativenumber
colorscheme dracula
@palashahuja
palashahuja / nn_mult.py
Created May 13, 2019 18:40
Neural Network Multiplication Approximator
# Reference
# =========
# Why does deep and cheap learning work so well?∗
# Henry W. Lin, Max Tegmark, and David Rolnick
# Dept. of Physics, Harvard University, Cambridge, MA 02138
# Dept. of Physics, Massachusetts Institute of Technology, Cambridge, MA 02139 and Dept. of Mathematics, Massachusetts Institute of Technology, Cambridge, MA 02139
# Here the x input takes two numbers and produces the multiplication as output
import numpy as np
# multiplication approximator
@palashahuja
palashahuja / gist:2c527d518f1e133c4d28
Last active March 8, 2016 20:42
dropconnect test implementation
template<
typename PerformanceFunction,
typename OutputLayerType,
typename PerformanceFunctionType,
typename MatType = arma::mat
>
void BuildDropConnectNetwork(MatType& trainData,
MatType& trainLabels,
MatType& testData,
MatType& testLabels,
@palashahuja
palashahuja / gist:e6dd41c31a52f588fa0d
Created March 6, 2015 09:40
let's start with this ...
if evidence:
for evidence_var in evidence:
for factor in working_factors[evidence_var]:
factor_reduced = factor.reduce('{evidence_var}_{state}'.format(evidence_var=evidence_var,
state=evidence[evidence_var]),
inplace=False)
for var in factor_reduced.scope():
working_factors[var].remove(factor)
working_factors[var].add(factor_reduced)
del working_factors[evidence_var]