Skip to content

Instantly share code, notes, and snippets.

@nstrayer
Created October 24, 2017 22:00
Show Gist options
  • Save nstrayer/9f28f0d72fcba90e2ad253194656eb4c to your computer and use it in GitHub Desktop.
Save nstrayer/9f28f0d72fcba90e2ad253194656eb4c to your computer and use it in GitHub Desktop.
A quick and dirty demo of a neural network implemented in vanilla javascript (plus statdists.js).
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"const sd = require('statdists')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Functions to initialize, forward propigate, back propigate, and update weights for a network."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"function make_neuron(n_parents, act_func, label){\n",
" return {\n",
" weights: sd.rnorm(n_parents + 1), // we add one for the bias term here.\n",
" activation: null, // activation value is pre-activation function value\n",
" output: null, // output is post activation function \n",
" error: null, // error before multiplied by deriv of activation function\n",
" delta: null, // error after multiplied by deriv of activation funtion\n",
" act_func, // name of activation function\n",
" label, \n",
" };\n",
"};\n",
"\n",
"function make_layer(n_neurons, n_parents, act_func, label = 'hidden'){\n",
" return sd.emptyArr(n_neurons).map(neuron => make_neuron(n_parents, act_func, label))\n",
"};\n",
"\n",
"function initialize_network({n_inputs, n_hidden, n_outputs}){\n",
" const network = [];\n",
" const hidden_layer = make_layer(n_hidden, n_inputs, 'relu', 'hidden');\n",
" const output_layer = make_layer(n_outputs, n_hidden, 'sigmoid', 'output');\n",
" return [hidden_layer, output_layer];\n",
"};\n",
"\n",
"// Function that takes a given array of weights and corresponding inputs for\n",
"// some neuron and calculates its activation value based upon those. \n",
"// This assumes that weights.length == inputs.length + 1, because of added bias/intercept.\n",
"function calc_activation(weights, inputs){\n",
" // bias is the last weight in our weights array\n",
" // so we append a 1 to the end of our inputs.\n",
" return [...inputs, 1].reduce((sum, inp, i) => sum + inp*weights[i], 0)\n",
"}\n",
"\n",
"function activation_func(activation, type){\n",
" switch (type){\n",
" case \"relu\":\n",
" return activation > 0 ? activation: 0;\n",
" case \"sigmoid\":\n",
" return 1 / (1 + Math.exp(-activation));\n",
" default: \n",
" return activation; \n",
" }\n",
"}\n",
"\n",
"\n",
"function activation_func_deriv(output, type){\n",
" switch (type){\n",
" case \"relu\":\n",
" return output > 0 ? 1: 0;\n",
" case \"sigmoid\":\n",
" return output * (1 - output);\n",
" default: \n",
" return output; \n",
" }\n",
"}\n",
"\n",
"// takes a set of data inputs and a given network\n",
"// and forward propigates through network, \n",
"// returns a network of same size/shape as original\n",
"// but with activation info filled out.\n",
"function forward_prop(data_input, network){\n",
"// console.log('forward propigation')\n",
" // these are the actual data entering the model\n",
" let inputs = data_input;\n",
" \n",
" // map over each layer in the network\n",
" return network.map((layer, i) => { \n",
" \n",
" // holder for what our inputs will be at the next layer (results of this one)\n",
" const layer_output = [];\n",
" \n",
" // map over every neuron to get next layer's input.\n",
" const updated_layer = layer.map(neuron => { \n",
" // activate nueron\n",
" const activation = calc_activation(neuron.weights, inputs);\n",
"\n",
" // squash with non-linear activation function\n",
" const output = activation_func(activation, neuron.act_func); \n",
"\n",
" // update the inputs for next layer with current layer's output\n",
" layer_output.push(output);\n",
" \n",
" // return the newly activated neuron\n",
" return Object.assign({}, neuron, {activation, output});\n",
" });\n",
" // replace inputs with layer's outputs for next layer\n",
" inputs = layer_output;\n",
" \n",
" // store layer in map output\n",
" return updated_layer\n",
" })\n",
"};\n",
"\n",
"// takes an activated network and returns it's predictions\n",
"function get_prediction(network){\n",
" const last_layer = network[network.length-1];\n",
" return last_layer.map(n => n.output);\n",
"}\n",
"\n",
"\n",
"// takes the true result of the data that the network was activated on\n",
"// and runs back_propigation to find gradients for each neuron \n",
"function back_propigate(expected, network){\n",
"// console.log('backward propigation')\n",
"\n",
" // iterate backwards through layers...\n",
" // unpacks array before reversing as to not mutate original\n",
" return [...network].reverse().reduce((new_network, layer, i) => {\n",
" // for each layer iterate through the neurons\n",
" const current_layer = layer.map((neuron, j) => {\n",
" // check if we're in our first iteration (aka last layer)\n",
" const first_iteration = i === 0;\n",
" \n",
" // if we're at the last layer (first iteration)\n",
" // we can easily calculate the error by just doing expected - seen\n",
" // Otherwise, we're in an intermediate/hidden layer\n",
" // and we need to sum connections to the layer above multiplied \n",
" // by their weights to accumulate current neuron's\n",
" // errors. We just need the lastest layer in the accumulated network\n",
" const error = first_iteration ?\n",
" expected[j] - neuron.output :\n",
" new_network[0].reduce( (total_error, child_neuron) => \n",
" total_error + (child_neuron.weights[j]*child_neuron.delta), \n",
" 0);\n",
" \n",
" // send error backwards through deriv of activation function\n",
" const delta = error * activation_func_deriv(neuron.output, neuron.act_func)\n",
" \n",
" // return neuron so we build up a layer in map.\n",
" return Object.assign({}, neuron, {error, delta});\n",
" }); // end layer loop\n",
" \n",
" // shove our newest layer into the cummulative results\n",
" return [current_layer, ...new_network]\n",
" }, []); // end network reduce\n",
"}\n",
"\n",
"// takes a network with calculated gradients and updates\n",
"// the weights accordingbased upon our learning rate.\n",
"function update_weights(data_inputs, learn_rate, network){\n",
"// console.log('Updating weights')\n",
"\n",
" return network.map((layer, i) => {\n",
" \n",
" const firstLayer = i === 0;\n",
" const inputs = firstLayer ?\n",
" data_inputs : \n",
" network[i - 1].map(neuron => neuron.output);\n",
" \n",
" const inputs_w_bias = [...inputs, 1];\n",
" \n",
" const new_layer = layer.map(neuron => {\n",
" const old_weights = neuron.weights\n",
" const new_weights = old_weights.map((weight, j) => \n",
" weight + (learn_rate * neuron.delta * inputs_w_bias[j])\n",
" );\n",
" return Object.assign({}, neuron, {weights: new_weights});\n",
" })\n",
" \n",
" return new_layer;\n",
" })\n",
"}\n",
"\n",
"function train_network({\n",
" network, \n",
" train_data,\n",
" learn_rate, \n",
" n_epochs = 25,\n",
" print_progress = true,\n",
" }){\n",
" \n",
" const n_outputs = train_data[0].expected.length;\n",
" \n",
" const train_errors = sd.emptyArr(n_epochs).reduce((errors, _, epoch) => {\n",
" let sum_error = 0;\n",
" \n",
" train_data.forEach(({obs, expected}) => { \n",
" const forward_step = forward_prop(obs, network);\n",
" const back_step = back_propigate(expected, forward_step);\n",
" const update_step = update_weights(obs, learn_rate, back_step)\n",
" \n",
" // check performance of network\n",
" const predictions = get_prediction(forward_step);\n",
" const squared_differences = sd.vsub(expected, predictions)\n",
" .map(diff => Math.pow(diff, 2));\n",
" \n",
" sum_error += sd.sum(squared_differences);\n",
" \n",
" // update network\n",
" network = update_step; \n",
" })\n",
" \n",
" if(print_progress){\n",
" console.log(`-------------------\n",
"Epoch: ${epoch + 1} | Error: ${sum_error.toPrecision(4)}`);\n",
" }\n",
" \n",
" return [...errors, sum_error]\n",
" }, [])\n",
" \n",
" return {network, train_errors};\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"const start_net = initialize_network({\n",
" n_inputs: 2, \n",
" n_hidden: 2, \n",
" n_outputs: 2 })\n",
"const forward_step = forward_prop([1, 0], start_net)\n",
"const back_step = back_propigate([0,1], forward_step)\n",
"const update_step = update_weights([1, 0], 0.5, back_step)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As an eyeball check, let's just peek at the first neuron in our network through these steps to make sure it's changing in the way we would expect."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{ weights: [ 0.8159776571365353, -1.6183992978473956, 0.7201714463230573 ],\n",
" activation: null,\n",
" output: null,\n",
" error: null,\n",
" delta: null,\n",
" act_func: 'relu',\n",
" label: 'hidden' }"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"start_net[0][0]"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{ weights: [ 0.8159776571365353, -1.6183992978473956, 0.7201714463230573 ],\n",
" activation: 1.5361491034595924,\n",
" output: 1.5361491034595924,\n",
" error: null,\n",
" delta: null,\n",
" act_func: 'relu',\n",
" label: 'hidden' }"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"forward_step[0][0]"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{ weights: [ 0.8159776571365353, -1.6183992978473956, 0.7201714463230573 ],\n",
" activation: 1.5361491034595924,\n",
" output: 1.5361491034595924,\n",
" error: -0.037318925456606075,\n",
" delta: -0.037318925456606075,\n",
" act_func: 'relu',\n",
" label: 'hidden' }"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"back_step[0][0]"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{ weights: [ 0.7973181944082323, -1.6183992978473956, 0.7015119835947543 ],\n",
" activation: 1.5361491034595924,\n",
" output: 1.5361491034595924,\n",
" error: -0.037318925456606075,\n",
" delta: -0.037318925456606075,\n",
" act_func: 'relu',\n",
" label: 'hidden' }"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"update_step[0][0]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Real data:\n",
"\n",
"Here's some real (but actually fake) data to test a classification network."
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"const myData = [\n",
" {obs: [2.7810836,2.550537003], expected: [1,0]},\n",
" {obs: [1.465489372,2.362125076],expected: [1,0]},\n",
" {obs: [3.396561688,4.400293529],expected: [1,0]},\n",
" {obs: [1.38807019,1.850220317], expected: [1,0]},\n",
" {obs: [3.06407232,3.005305973], expected: [1,0]},\n",
" {obs: [7.627531214,2.759262235],expected: [0,1]},\n",
" {obs: [5.332441248,2.088626775],expected: [0,1]},\n",
" {obs: [6.922596716,1.77106367], expected: [0,1]},\n",
" {obs: [8.675418651,-0.242068655],expected: [0,1]},\n",
" {obs: [7.673756466,3.508563011], expected: [0,1]},\n",
"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"First we initialize our network. "
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"const my_net = initialize_network({\n",
" n_inputs: myData[0].obs.length, \n",
" n_hidden: 2, \n",
" n_outputs: myData[0].expected.length \n",
"});"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next we can train it and save the resultant network and error history."
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"-------------------\n",
"Epoch: 1 | Error: 8.819\n",
"-------------------\n",
"Epoch: 2 | Error: 10.63\n",
"-------------------\n",
"Epoch: 3 | Error: 7.998\n",
"-------------------\n",
"Epoch: 4 | Error: 7.967\n",
"-------------------\n",
"Epoch: 5 | Error: 7.881\n",
"-------------------\n",
"Epoch: 6 | Error: 7.834\n",
"-------------------\n",
"Epoch: 7 | Error: 6.746\n",
"-------------------\n",
"Epoch: 8 | Error: 4.219\n",
"-------------------\n",
"Epoch: 9 | Error: 2.937\n",
"-------------------\n",
"Epoch: 10 | Error: 2.437\n",
"-------------------\n",
"Epoch: 11 | Error: 1.964\n",
"-------------------\n",
"Epoch: 12 | Error: 1.025\n",
"-------------------\n",
"Epoch: 13 | Error: 1.076\n",
"-------------------\n",
"Epoch: 14 | Error: 0.6741\n",
"-------------------\n",
"Epoch: 15 | Error: 0.6883\n",
"-------------------\n",
"Epoch: 16 | Error: 0.5130\n",
"-------------------\n",
"Epoch: 17 | Error: 0.5016\n",
"-------------------\n",
"Epoch: 18 | Error: 0.7433\n",
"-------------------\n",
"Epoch: 19 | Error: 0.3623\n",
"-------------------\n",
"Epoch: 20 | Error: 0.3344\n",
"-------------------\n",
"Epoch: 21 | Error: 0.3687\n",
"-------------------\n",
"Epoch: 22 | Error: 0.2871\n",
"-------------------\n",
"Epoch: 23 | Error: 0.2585\n",
"-------------------\n",
"Epoch: 24 | Error: 0.2364\n",
"-------------------\n",
"Epoch: 25 | Error: 0.2180\n"
]
}
],
"source": [
"const train_obj = train_network({\n",
" network: my_net, \n",
" train_data: myData,\n",
" learn_rate: 0.5, \n",
" n_epochs: 25,\n",
" print_progress: true,\n",
"});"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[ 8.81905083327959,\n",
" 10.628140253952441,\n",
" 7.998361150139645,\n",
" 7.9672646810366485,\n",
" 7.880625952274563,\n",
" 7.833915843413737,\n",
" 6.745751273117733,\n",
" 4.219248641225225,\n",
" 2.9367793950436294,\n",
" 2.437230023417222,\n",
" 1.9635573189225937,\n",
" 1.0247667057582754,\n",
" 1.0764923407382245,\n",
" 0.6741275124569228,\n",
" 0.688279346437304,\n",
" 0.5129869590997902,\n",
" 0.5015848601289554,\n",
" 0.7433364513548087,\n",
" 0.36225998711848406,\n",
" 0.3344326845337795,\n",
" 0.3687317143810583,\n",
" 0.2870664468367708,\n",
" 0.2585036620289994,\n",
" 0.23640032315989706,\n",
" 0.21802548541577452 ]"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train_obj.train_errors"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Looks like it's actually working!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Javascript (Node.js)",
"language": "javascript",
"name": "javascript"
},
"language_info": {
"file_extension": ".js",
"mimetype": "application/javascript",
"name": "javascript",
"version": "7.10.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment