Skip to content

Instantly share code, notes, and snippets.

@kballenegger
Created March 9, 2012 10:33
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kballenegger/599518c1ca241ddfbafe to your computer and use it in GitHub Desktop.
Save kballenegger/599518c1ca241ddfbafe to your computer and use it in GitHub Desktop.
(ns mltest.core)
(use 'clojure.math.numeric-tower)
; training_set = [[3,2],
; [1,2],
; [0,1],
; [4,3]]
(def training-set
[[3 2]
[1 2]
[0 1]
[4 3]])
; def h x, theta0, theta1
; theta0 + theta1 * x
; end
(defn h
"hypothesis"
[x theta0 theta1]
(* x (+ theta0 theta1)))
;def J theta0, theta1, training_set
; sum = 0
; training_set.each do |e|
; sum += (h(e[0],theta0,theta1) - e[1]) ** 2
; end
; 1.0 / (2 * training_set.count) * sum
;end
(defn J
"cost function for hypothesis above"
[theta0 theta1 training-set]
(reduce +
(let [cost-alg #(expt (- (h (first %) theta0 theta1) (second %)) 2)]
(map cost-alg training-set))))
;def derivative precision_magnitude, &f
; raise "can only create derivaties of single-argument functions" unless f.arity == 1
; dx = 10 ** (0-precision_magnitude)
; lambda { |x| (f.call(x + dx) - f.call(x)) / dx }
;end
(defn round-magnitude
"rounds a number d to the magnitude m"
[d m]
d)
; (/ (round (* d m)) m))
(defn derivative
"returns a derivative function for function f (assumes arity of 1), at a precision of magnitude p"
[m f]
(let [dx (expt 10 (- 0 m 1))]
#(let [d (/ (- (f (+ % dx)) (f %)) dx)]
(round-magnitude d m))))
;def gradient_descent learning_rate, precision_magnitude, &f
; thetas = []
; (1..f.arity).each { thetas.push 0 }
; good = false
; until good
; new_thetas = thetas
; good = true
; thetas.each_index do |j|
; prime = (derivative(precision_magnitude) { |x| tmp_thetas = thetas; tmp_thetas[j] = x; f.call(tmp_thetas) }).call(thetas[j])
; new_thetas[j] = thetas[j] - learning_rate * prime
; good = false if prime.abs > (10 ** (0-precision_magnitude))
; end
; thetas = new_thetas
; end
; thetas
;end
(defn gradient-descent
"performs a gradient descent algorithm using learning rate a, precision of magnitude m, on function f (assuming arity of 2)"
[a m f]
(let [arity 2] ; assuming arity of 2 because can't figure this out properly right now
(loop [thetas (map (fn [_] 0) (range 0 arity))]
(let [indexes (range 0 arity)
primes (map #((derivative m (fn [x] (apply f (map (fn [i] (if (= i %) x (nth thetas i))) indexes)))) (nth thetas %)) indexes)
is-good (= 1 (reduce * (map #(if (> (abs %) (expt 10 (- 0 m 1))) 0 1) primes)))]
(if is-good
(map #(round-magnitude % m) thetas)
(recur (map #(- (nth thetas %) (* a (nth primes %))) indexes))
)))))
;# use case
;
;thetas = gradient_descent 0.1, 10 do |theta0, theta1|
; J theta0, theta1, training_set
;end
;
;thetas.map! { |e| e.round(6) }
;p thetas
(def thetas (gradient-descent 0.1 10 #(J %1 %2 training-set)))
;solution = lambda { |x|
; h x, thetas[0], thetas[1]
;}
(defn solution
"solution!"
[x]
(apply h (concat [x] thetas)))
;puts solution.call(3)
(defn -main [& argv]
(println (solution 3)))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment