Skip to content

Instantly share code, notes, and snippets.

@octaviordz
Forked from dlidstrom/output
Created August 13, 2023 04:18
Show Gist options
  • Save octaviordz/8729624da08f16000c48e7e3894f7b34 to your computer and use it in GitHub Desktop.
Save octaviordz/8729624da08f16000c48e7e3894f7b34 to your computer and use it in GitHub Desktop.
Minimal neural network in F# with no dependencies
(*
This is a vanilla neural network implementation in about 60 lines of F# code.
It was implemented after reading Matt Mazur's step-by-step description found here:
https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/
- There are *no* dependencies.
- This code updates the biases too (which Matt Mazur doesn't)
This network can be used for simple image recognition (handwriting examples),
classification, prediction, and so on. Even if this is simple I think the
applications are numerous.
No serialization has been implemented. Instead, two constructors will allow
you to construct from scratch or from given weights. See sample below.
Possible improvements:
- Add L2 (or L1) regularization. I tried but am not sure how to do it.
- Add batch learning, i.e. present multiple inputs before updating weights
(sum the errors).
Licensed under the MIT License given below.
Copyright 2023 Daniel Lidstrom
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the “Software”), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*)
open System
type Neural(weightsInputs, weightsHidden, biasesInputs, biasesHidden) =
let sigmoid f = 1.0 / (1.0 + Math.Exp(-f))
let tuple2 a b = a, b
new(inputCount, hiddenCount, outputCount, ?seed) =
let r = Random(defaultArg seed 0)
let weightsInputsArray = Array.init (inputCount * hiddenCount) (fun _ -> r.NextDouble() - 0.5)
let weightsHiddenArray = Array.init (hiddenCount * outputCount) (fun _ -> r.NextDouble() - 0.5)
let biasesInputs = Array.zeroCreate hiddenCount
let biasesHidden = Array.zeroCreate outputCount
Neural(
weightsInputsArray |> Array.chunkBySize inputCount,
weightsHiddenArray |> Array.chunkBySize hiddenCount,
biasesInputs,
biasesHidden)
member val WeightsInputs = weightsInputs
member val WeightsHidden = weightsHidden
member val BiasesInputs = biasesInputs
member val BiasesHidden = biasesHidden
member this.Predict (inputs: float array) =
snd (this.Predict' inputs)
member private _.Predict' (inputs: float array) =
let hps = weightsInputs |> Array.map (fun ws -> (ws, inputs) ||> Array.map2 (*) |> Array.sum)
let hiddenNodes = (hps, biasesInputs) ||> Array.map2 (fun l r -> sigmoid (l + r))
let ops = weightsHidden |> Array.map (fun ws -> (ws, hiddenNodes) ||> Array.map2 (*) |> Array.sum)
let outputNodes = (ops, biasesHidden) ||> Array.map2 (fun l r -> sigmoid (l + r))
hiddenNodes, outputNodes
member this.Train (inputs: float array) (expected: float array) lr =
let hiddens, outputs = this.Predict' inputs
let dOs = (outputs, expected) ||> Array.map2 (-)
let dNetOs = outputs |> Array.map (fun w -> w * (1. - w))
let dProds = (dOs, dNetOs) ||> Array.map2 (*)
let gWOs = Array.allPairs dProds hiddens
let errsH =
[| 0..weightsHidden[0].Length - 1 |]
|> Array.map (fun i ->
(dProds, weightsHidden |> Array.map (fun v -> v[i]))
||> Array.map2 (*) |> Array.sum)
let dNetH = hiddens |> Array.map (fun w -> w * (1. - w))
let dIWs =
Array.allPairs ((errsH, dNetH) ||> Array.map2 (*)) inputs
|> Array.map (fun (l, r) -> l * r)
for (i, u) in Array.mapi tuple2 weightsHidden do
for (j, v) in Array.mapi tuple2 u do
let (dProd, out) = gWOs[i * weightsHidden.Length + j]
weightsHidden[i][j] <- v - lr * dProd * out
for (i, u) in Array.mapi tuple2 biasesHidden do
biasesHidden[i] <- u - lr * dOs[i] * dNetOs[i]
for (i, u) in Array.mapi tuple2 weightsInputs do
for (j, v) in Array.mapi tuple2 u do
let dIW = dIWs[i * weightsInputs[0].Length + j]
weightsInputs[i][j] <- v - lr * dIW
for (i, u) in Array.mapi tuple2 biasesInputs do
biasesInputs[i] <- u - lr * dNetH[i] * errsH[i]
// 8< -snip- 8-
// rest is sample uses
let lr = 0.5 // learning rate
// example from Matt's blog
let n = Neural(
[| [| 0.15; 0.2 |]
[| 0.25; 0.3 |] |],
[| [| 0.4; 0.45 |]
[| 0.5; 0.55 |] |],
[| 0.35; 0.35 |],
[| 0.6; 0.6 |])
// same network topology with random initial weights
let n' = Neural(2, 4, 2)
let inputs = [| 0.05; 0.1 |]
let expected = [| 0.01; 0.99 |]
printfn "expected: %A" expected
printfn "prediction with given weights: %A" (n.Predict inputs)
n.Train inputs expected lr
printfn "prediction after one round of training: %A" (n.Predict inputs)
for _ = 1 to 4000 do
n.Train inputs expected lr
n'.Train inputs expected lr
printfn "prediction after training (with given weights): %A" (n.Predict inputs)
printfn "prediction after training (with random initial weights): %A" (n'.Predict inputs)
// now for something else, this network can compute the logical functions
// AND, OR, NAND, NOR, XOR, XNOR
// A single network can compute all of them. I found that it needs 14 units in the hidden
// layer in order to converge to a solution for all logical functions.
let trainingNet = Neural(2, 14, 6)
for _ = 1 to 4000 do
trainingNet.Train [| 0; 0 |] [| 0; 0; 1; 1; 0; 1 |] lr
trainingNet.Train [| 0; 1 |] [| 0; 1; 1; 0; 1; 0 |] lr
trainingNet.Train [| 1; 0 |] [| 0; 1; 1; 0; 1; 0 |] lr
trainingNet.Train [| 1; 1 |] [| 1; 1; 0; 0; 0; 1 |] lr
// trainingNet shows how to construct an instance from given weights
// use your preferred way of storing the weights (serialize to json, binary, whatever)
let xorNet = Neural(trainingNet.WeightsInputs, trainingNet.WeightsHidden, trainingNet.BiasesInputs, trainingNet.BiasesHidden)
printfn " AND OR NAND NOR XOR XNOR"
let r = xorNet.Predict [| 0; 0 |] in printfn "0,0 = %.2f %.2f %.2f %.2f %.2f %.2f" r[0] r[1] r[2] r[3] r[4] r[5]
let r = xorNet.Predict [| 0; 1 |] in printfn "0,1 = %.2f %.2f %.2f %.2f %.2f %.2f" r[0] r[1] r[2] r[3] r[4] r[5]
let r = xorNet.Predict [| 1; 0 |] in printfn "1,0 = %.2f %.2f %.2f %.2f %.2f %.2f" r[0] r[1] r[2] r[3] r[4] r[5]
let r = xorNet.Predict [| 1; 1 |] in printfn "1,1 = %.2f %.2f %.2f %.2f %.2f %.2f" r[0] r[1] r[2] r[3] r[4] r[5]
printfn "%A" {|
WeightsInputs = xorNet.WeightsInputs
WeightsHidden = xorNet.WeightsHidden
BiasesInputs = xorNet.BiasesInputs
BiasesHidden = xorNet.BiasesHidden
|}
$ dotnet fsi .\Network.fsx
expected: [|0.01; 0.99|]
prediction with given weights: [|0.7513650696; 0.7729284653|]
prediction after one round of training: [|0.7284417622; 0.7783769203|]
prediction after training (with given weights): [|0.01504883933; 0.9850741484|]
prediction after training (with random initial weights): [|0.01438626937; 0.9829258874|]
AND OR NAND NOR XOR XNOR
0,0 = 0.00 0.01 1.00 0.99 0.00 0.99
0,1 = 0.01 0.98 0.97 0.02 0.98 0.03
1,0 = 0.01 1.00 0.97 0.01 0.99 0.02
1,1 = 0.98 1.00 0.05 0.01 0.02 0.98
{ BiasesHidden =
[|0.4088152155; 4.588121765; -3.45690469; -7.271609535; -7.182252718;
-2.808535179|]
BiasesInputs =
[|1.106119474; 1.675248795; -2.496470733; -1.783199984; -0.6159974799;
-2.284892253; 3.749079962; 6.212742521; -2.933751427; 1.051539122;
3.470553729; -1.864585698; 4.29595167; -1.182060735|]
WeightsHidden =
[|[|-1.493953155; -3.786827905; 1.069006447; -1.375610638; -1.584239552;
1.973354027; -3.279882628; -2.715952912; 2.386170812; -1.777113403;
-0.893173322; 0.2143214478; -3.219967493; 2.628085537|];
[|-3.803622371; -3.205465017; 3.262874469; -1.363352316; -0.8991548274;
-0.28592493; -3.578271666; 2.133427764; 0.5086740192; -2.323094977;
1.384471029; -0.5532097273; -2.307180395; 4.86128788|];
[|-2.899673522; 2.647510515; 0.9568199473; -2.362072798; 1.389506882;
-0.8227709962; -1.606731393; 4.879536197; -0.01181068123; -1.757517586;
7.376238861; -3.471640661; 2.198072651; 0.284814227|];
[|-1.502121134; 4.46446204; 0.5538146718; -1.876370702; 8.015558607;
-4.014820558; 2.1003523; 0.784077254; -2.726398121; 10.31415106;
4.167370043; 10.57362169; -11.45752472; 10.10782708|];
[|2.042294916; 0.5340243035; -2.018428467; 10.21817848; 4.771876582;
10.8613178; -11.06077956; 10.07732319; 1.982621462; -6.440088493;
11.30149933; 12.78642473; -12.53927742; 2.331915639|];
[|-11.81144836; 9.850845234; 2.084458922; -6.410436281; 11.59930468;
13.42733968; -13.13854545; 3.038105345; 5.840453029; 3.553931683;
14.09342416; -6.925709809; -2.349479701; -1.30415306|]|]
WeightsInputs =
[|[|-3.777894901; 1.631708753|]; [|-3.422196591; -5.198842115|];
[|4.606059791; 0.2426064609|]; [|4.341552354; -6.387727071|];
[|-2.668876148; -3.32569373|]; [|0.8889484527; 3.058268682|];
[|-3.791588176; 0.295461549|]; [|-4.501523269; -4.080296483|];
[|1.73341427; 2.869784449|]; [|-4.456161441; -5.027174121|];
[|-3.678682625; -2.36937638|]; [|0.4783670907; -3.282672747|];
[|-1.297169933; -3.647393036|]; [|4.424418655; 5.389696942|]|] }
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment