Create a gist now

Instantly share code, notes, and snippets.

What would you like to do?
Eigenvectors in Idris

This program explores Idris, a general-purpose functional programming language with dependent types. This program is explained in detail on my blog.

To run this program yourself, you will need Haskell installed (probably GHC), and you will probably want cabal. The best way to experiment with this program is to create a cabal sandbox, and install Idris:

cabal sandbox init
cabal install idris

and then run the tests (executed from the same directory):

.cabal-sandbox/bin/idris --testpkg eigenvector.ipkg

or run the program:

.cabal-sandbox/bin/idris --build eigenvector.ipkg

If you add .cabal-sandbox-bin to your $PATH, then you may just type idris --build eigenvector.ipkg, etc.

Enjoy!

module Eigenvector
import Data.Matrix
import Data.Matrix.Algebraic
import Effects
import Effect.Random
import Effect.System
import Debug.Error
-- Note that Debug.Trace.trace is a good tool for quick & dirty debugging.
norm : Vect n Double -> Double
norm v = sqrt (v <:> v)
||| Scale v to have unit length.
normalize : Vect n Double -> Vect n Double
normalize v = (1 / norm v) <#> v
||| Orthogonalize v to w.
orthogonalize : Vect n Double -> Vect n Double -> Vect n Double
orthogonalize w v = v <-> ((v <:> w) <#> w)
||| Calculate the eigenvalue corresponding to a given eigenvector.
eigenvalue : Matrix n n Double -> Vect n Double -> Double
eigenvalue matrix eigenvector = eigenvector <:> matrix </> eigenvector
||| Estimate a single eigenvector to the expected precision, given
||| a list of previously calculated eigenvectors.
eigenvector : Matrix n n Double
-> Double
-> Vect n Double
-> List (Vect n Double)
-> Vect n Double
eigenvector {n} matrix precision seed previousEigenvectors =
if err < precision
then result
else eigenvector matrix precision result previousEigenvectors where
-- Orthogonalize the vector to each previously computed eigenvector.
tmp : Vect n Double
tmp = foldr orthogonalize seed previousEigenvectors
-- </> is matrix multiplication by a row vector.
result : Vect n Double
result = normalize $ matrix </> tmp
err : Double
err = case compare (eigenvalue matrix result) 0 of
GT => norm (tmp <-> result)
EQ => error "Error margin is undefined when the eigenvalue is 0."
LT => norm (tmp <+> result)
||| Naively generate a (pseudo)-random float between 0 and 1.
rndDouble : Integer -> Eff Double [RND]
rndDouble max = do
rnd <- rndInt 0 max
return (fromInteger rnd / fromInteger max)
||| map using a function which depends on the previously-mapped values.
||| like a combination of map and fold in which the state is the values
||| which have already been mapped.
mapRemember : (a -> List b -> b) -> List a -> List b -> List b
mapRemember f values state = case values of
[] => reverse state
(x :: xs) => mapRemember f xs (f x state :: state)
||| Calculate the eigenvectors of a matrix using the power method.
eigenvectors : Matrix n n Double
-> Double
-> Eff (List (Vect n Double)) [RND, SYSTEM]
eigenvectors {n} matrix precision = do
srand !time
-- The functions given as arguments to higher-order effectful functions
-- (mapE, mapVE) must be syntactically applied directly to their arguments.
seedVectors <- mapE (\vs => mapVE (\x => rndDouble x) vs)
$ List.replicate n (Vect.replicate n (cast $ 1 / precision))
return $ mapRemember (eigenvector matrix precision) seedVectors []
package eigenvector
opts = "-p effects -p contrib"
modules = Eigenvector
, Test
tests = Test.normalizeYieldsUnitVector
, Test.singleRealEigenvector
, Test.allRealEigenvectors
, Test.orthogonalizeYieldsOrthogonalVectors
module Test
import Data.Vect
import Data.Matrix
import Data.Matrix.Algebraic
import Effects
import Effect.Random
import Effect.System
import Eigenvector
import TestUtil
shouldBeWithin : Double
-> Vect n Double
-> Vect n Double -> IO ()
shouldBeWithin precision =
should (\actual, expected => (norm $ actual <-> expected) < precision)
shouldBeCloseTo : (Neg a, Num a, Ord a, Show a) => a -> a -> a -> IO ()
shouldBeCloseTo precision = should $ (\actual, expected =>
(actual - expected < precision) || (expected - actual < precision))
normalizeYieldsUnitVector : IO ()
normalizeYieldsUnitVector = describe "'normalize' should yield a unit vector"
$ (shouldBeCloseTo 0.0001) (norm $ normalize [1, 1]) 1
orthogonalizeYieldsOrthogonalVectors : IO ()
orthogonalizeYieldsOrthogonalVectors = describe "'orthogonalize'"
$ (shouldBeCloseTo 0.0001) (result <:> w) 0
where
v : Vect 2 Double
v = [1, 1]
w : Vect 2 Double
w = [1, -1]
result : Vect 2 Double
result = orthogonalize v w
singleRealEigenvector : IO ()
singleRealEigenvector = describe "'eigenvectors' is correct for a real matrix"
$ (shouldBeWithin precision) actual expected
where
matrix : Matrix 3 3 Double
matrix =
[ [1, 2, 3]
, [2, 4, 1]
, [3, 1, 5]
]
precision : Double
precision = 0.01
actual : Vect 3 Double
actual = eigenvector matrix precision [1,1,1] []
-- From WolframAlpha
expected : Vect 3 Double
expected = normalize [0.649679, 0.640561, 1]
allRealEigenvectors : IO ()
allRealEigenvectors = describe "all real eigenvectors" $
do
actual <- run $ eigenvectors matrix precision
traverse_ (uncurry $ shouldBeWithin precision) (zip actual expected)
where
matrix : Matrix 3 3 Double
matrix =
[ [1, 2, 3]
, [2, 4, 1]
, [3, 1, 5]
]
precision : Double
precision = 0.0001
-- Expected eigenvectors copied from WolframAlpha.
-- It's ok to normalize them, because eigenvectors multiplied by a scalar
-- remain eigenvectors - but why is it necessary?
expected : List (Vect 3 Double)
expected = map normalize
[ [0.649679, 0.640561, 1]
, map negate [-0.0277091, -1.53303, 1]
, map negate [-2.22197, 0.692466, 1]
]
module TestUtil
describe : String -> IO () -> IO ()
describe info runTest = do
putStr $ info ++ ": "
runTest
should : Show a => (a -> a -> Bool) -> a -> a -> IO ()
should pred actual expected =
if pred actual expected
then putStrLn "Succeeded."
else do
putStrLn " Failed."
putStrLn $ " Expected value: " ++ show expected ++ "."
putStrLn $ " Actual value: " ++ show actual ++ "."
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment