Skip to content

Instantly share code, notes, and snippets.

@euhmeuh
Last active November 22, 2017 22:29
Show Gist options
  • Save euhmeuh/4c6e9ea94fb85ae4e36dde4cff9c02d8 to your computer and use it in GitHub Desktop.
Save euhmeuh/4c6e9ea94fb85ae4e36dde4cff9c02d8 to your computer and use it in GitHub Desktop.
Machine learning
#lang racket
(require math racket/trace)
(define T matrix-transpose)
(define (sigmoid x)
(/ (+ 1.0 (exp (- x)))))
(define (sigmoid-deriv x)
(let ([y (sigmoid x)])
(* y (- 1 y))))
(define (matrix-sig m)
(matrix-map sigmoid m))
(define (matrix-dsig m)
(matrix-map sigmoid-deriv m))
(define (matrix-mul m n)
(matrix-map * m n))
(define (matrix-delta outputs errors)
(matrix-mul errors (matrix-dsig outputs)))
(define (make-random-matrix rows cols)
(build-matrix rows cols (lambda (x y) (random))))
(define (make-weights rows cols)
(matrix-map sub1
(matrix-scale (make-random-matrix rows cols) 2)))
(define (make-layer nb-of-neurons inputs-per-neuron)
(let ([weights (make-weights inputs-per-neuron nb-of-neurons)]
[outputs '()]) ;; we need to remember outputs to calculate error delta later
(define (process inputs)
(set! outputs (matrix-sig (matrix* inputs weights)))
outputs)
(define (update inputs delta)
(set! weights (matrix+ weights
(matrix* (T inputs) delta))))
(lambda (m . args)
(cond
[(eq? m 'process) (apply process args)]
[(eq? m 'update) (apply update args)]
[(eq? m 'weights) weights]
[(eq? m 'outputs) outputs]))))
(define (propagate inputs layers)
(if (pair? layers)
(propagate ((car layers) 'process inputs) (cdr layers))
inputs))
(define (backpropagate first-inputs expected-outputs layers)
(define (get-upper-inputs layers)
(if (and (pair? layers) (not (empty? (cdr layers))))
((cadr layers) 'outputs)
first-inputs))
(define (train layers previous-errors)
(unless (empty? layers)
(let* ([layer (car layers)]
[delta (matrix-delta (layer 'outputs) previous-errors)]
[errors (matrix* delta (T (layer 'weights)))]
[inputs (get-upper-inputs layers)])
(layer 'update inputs delta)
(train (cdr layers) errors))))
(define first-errors (matrix- expected-outputs ((last layers) 'outputs)))
(train (reverse layers) first-errors))
(define (train-all inputs expected-outputs iterations layers)
(for ((_ iterations))
(propagate inputs layers)
(backpropagate inputs expected-outputs layers)))
(define (display-results matrix)
(displayln (matrix->list matrix)))
(define (display-matrix matrix)
(for ((i (in-range (matrix-num-rows matrix))))
(for ((j (in-range (matrix-num-cols matrix))))
(printf "~a\t" (matrix-ref matrix i j)))
(newline)))
;; let's try this!
(define layer1 (make-layer 4 3))
(displayln "layer 1: 4 neurons with 3 inputs:")
(display-matrix (layer1 'weights))
(define layer2 (make-layer 1 4))
(displayln "layer 2: 1 neuron with 4 inputs:")
(display-matrix (layer2 'weights))
(define network (list layer1 layer2))
;; rules for XOR
(define inputs (matrix [[0 0 1] [0 1 1] [1 0 1] [0 1 0] [1 0 0] [1 1 1] [0 0 0]]))
(define results (T (matrix [[0 1 1 1 1 0 0]])))
;; training
(displayln "training...")
(train-all inputs results 3000 network)
(displayln "layer1 after training:")
(display-matrix (layer1 'weights))
(displayln "layer 2 after training:")
(display-matrix (layer2 'weights))
(displayln "results with same inputs as training:")
;; should be similar to the expected results
(display-matrix (propagate inputs network))
(displayln "results with new inputs:")
;; try new inputs not seen before
(display-matrix (propagate (matrix [[1 1 0] [1 0 1] [1 1 1] [1 0 1] [0 1 1] [0 0 0] [0 1 1] [1 0 1] [1 1 1]])
network))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment