Skip to content

Instantly share code, notes, and snippets.

@ceteri

ceteri/2.01.py Secret

Last active August 29, 2015 13:57
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ceteri/751f810b774b15d713eb to your computer and use it in GitHub Desktop.
Save ceteri/751f810b774b15d713eb to your computer and use it in GitHub Desktop.
Just Enough Math - example code + data + results
class Monoid (object):
def __init__ (self, null, lift, op):
self.null = null
self.lift = lift
self.op = op
def fold (self, xs):
if hasattr(xs, "__fold__"):
return xs.__fold__(self)
else:
return reduce(self.op, (self.lift(x) for x in xs), self.null)
def __call__ (self, *args):
return self.fold(args)
def star (self):
return Monoid(self.null, self.fold, self.op)
summ = Monoid(0, lambda x: x, lambda a,b: a+b)
joinm = Monoid('', lambda x: str(x), lambda a,b: a+b)
listm = Monoid([], lambda x: [x], lambda a,b: a+b)
tuplem = Monoid((), lambda x: (x,), lambda a,b: a+b)
lenm = Monoid(0, lambda x: 1, lambda a,b: a+b)
prodm = Monoid(1, lambda x: x, lambda a,b: a*b)
## extended to define a monoid for folding Python `dict`
def dict_op (a, b):
for key, val in b.items():
if not key in a:
a[key] = val
else:
a[key] += val
return a
dictm = Monoid({}, lambda x: x, lambda a,b: dict_op(a, b))
if __name__=='__main__':
x1 = { "a": 2, "b": 3 }
x2 = { "b": 2, "c": 7 }
print x1, x2
print dictm.fold([x1, x2])
summ.star().fold([[10, 20, 30], [40, 50]])
== summ.fold([10, 20, 30]) + summ.fold([40, 50])
== (10 + 20 + 30) + (40 + 50)
== 10 + 20 + 30 + 40 + 50
>>> listm.star().fold([[1, 2, 3], [4, 5, 6]])
[1, 2, 3, 4, 5, 6]
>>> summ.star().fold([[10, 20, 30], [40, 50]])
150
>>> print dictm.fold([x1, x2])
{'a': 4, 'c': 14, 'b': 10}
## based on "Cookie" problem
## http://www.greenteapress.com/thinkbayes/
from thinkbayes import Pmf
pmf = Pmf()
pmf.Set("H1", 1 / 2.0)
pmf.Set("H2", 1 / 2.0)
pmf.Mult("H1", 30 / 40.0)
pmf.Mult("H2", 20 / 40.0)
pmf.Normalize()
for bowl in ["H1", "H2"]:
print bowl, pmf.Prob(bowl)
>>> for bowl in ["H1", "H2"]:
... print bowl, pmf.Prob(bowl)
...
H1 0.6
H2 0.4
## use NumPy to calculate vector operations
import numpy as np
a = 3
b = 2
x = np.array([ 2, 3, 1, 0 ])
y = np.array([ 5, 1, 5, 0 ])
print np.add(x, a)
print np.dot(b, x)
print np.add(x, y)
print np.dot(x, y)
>>> import numpy as np
>>>
>>> a = 3
>>> b = 2
>>> x = np.array([ 2, 3, 1, 0 ])
>>> y = np.array([ 5, 1, 5, 0 ])
>>>
>>> print np.add(x, a)
[5 6 4 3]
>>>
>>> print np.dot(b, x)
[4 6 2 0]
>>>
>>> print np.add(x, y)
[7 4 6 0]
>>>
>>> print np.dot(x, y)
18
import numpy as np
A = np.array([1, 2, 3, 4]).reshape(2, 2)
B = np.array([5, 6, 7, 8]).reshape(2, 2)
x = [2, 3]
print A
print B
print np.add(A, B)
print np.dot(A, x)
>>> import numpy as np
>>>
>>> A = np.array([1, 2, 3, 4]).reshape(2, 2)
>>> B = np.array([5, 6, 7, 8]).reshape(2, 2)
>>> x = [2, 3]
>>>
>>> print A
[[1 2]
[3 4]]
>>> print B
[[5 6]
[7 8]]
>>>
>>> print np.add(A, B)
[[ 6 8]
[10 12]]
>>> print np.dot(A, x)
[ 8 18]
import numpy as np
A = np.array([1, 2, 3, 4]).reshape(2, 2)
I = np.array([1, 0, 0, 1]).reshape(2, 2)
print I
print np.dot(A, I)
>>> import numpy as np
>>> A = np.array([1, 2, 3, 4]).reshape(2, 2)
>>> I = np.array([1, 0, 0, 1]).reshape(2, 2)
>>>
>>> print I
[[1 0]
[0 1]]
>>> print np.dot(A, I)
[[1 2]
[3 4]]
>>>
>>> import numpy as np
>>> A = np.array([1, 2, 3, 4]).reshape(2, 2)
>>>
>>> print np.linalg.det(A)
-2.0
>>> import numpy as np
>>>
>>> A = np.array([3, 9, 4, 8]).reshape(2, 2)
>>> y = [ 5, 12 ]
>>> invA = np.linalg.inv(A)
>>>
>>> print np.linalg.det(A)
-12.0
>>> print invA
[[-0.66666667 0.75 ]
[ 0.33333333 -0.25 ]]
>>> print np.dot(invA, y)
[ 5.66666667 -1.33333333]
>>> import numpy as np
>>>
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1.0, 0.2, 0.9, 2.1])
>>> A = np.vstack([x, np.ones(len(x))]).T
>>>
>>> (m, b), resid, rank, singular = np.linalg.lstsq(A, y)
>>>
>>> print A
[[ 0. 1.]
[ 1. 1.]
[ 2. 1.]
[ 3. 1.]]
>>> print (m, b), resid
(1.0, -0.95) [ 0.05]
>>> import numpy as np
>>>
>>> A = np.array([0, 1, -2, -3]).reshape(2, 2)
>>>
>>> print np.linalg.eig(A)
(array([-1., -2.]), array([[ 0.70710678, -0.4472136 ],
[-0.70710678, 0.89442719]]))
## @JustGlowing 2011-07-22
## http://glowingpython.blogspot.com/2011/07/principal-component-analysis-with-numpy.html
import numpy as np
def princomp (A):
"""
principal components analysis on the N x P data matrix A
rows correspond to observations, columns to variables
"""
# subtract the mean (along columns)
M = (A - np.mean(A.T, axis=1)).T
# NB: results are not always sorted
[latent, coeff] = np.linalg.eig(np.cov(M))
# projection of the data in the new space
score = np.dot(coeff.T, M)
return coeff, score, latent
## analyze each judge's scores for our bartendrs
A = np.array([
[ 5.1, 4.9, 4.7, 4.6, 5.7, 5.7, 6.2, 5.1, 5.7, 6.5, 7.7, 7.7, 6.0, 6.9, 5.6 ],
[ 3.5, 3.0, 3.2, 3.1, 3.0, 2.9, 2.9, 2.5, 2.8, 3.0, 3.8, 2.6, 2.2, 3.2, 2.8 ],
[ 1.4, 1.4, 1.3, 1.5, 4.2, 4.2, 4.3, 3.0, 4.1, 5.5, 6.7, 6.9, 5.0, 5.7, 4.9 ],
[ 0.2, 0.2, 0.2, 0.2, 1.2, 1.3, 1.3, 1.1, 1.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2.0 ],
])
coeff, score, latent = princomp(A.T)
print coeff
print score
print latent
portion_variance = map(lambda x: x / sum(latent), latent)
print portion_variance
>>> A = np.array([
... [ 5.1, 4.9, 4.7, 4.6, 5.7, 5.7, 6.2, 5.1, 5.7, 6.5, 7.7, 7.7, 6.0, 6.9, 5.6 ],
... [ 3.5, 3.0, 3.2, 3.1, 3.0, 2.9, 2.9, 2.5, 2.8, 3.0, 3.8, 2.6, 2.2, 3.2, 2.8 ],
... [ 1.4, 1.4, 1.3, 1.5, 4.2, 4.2, 4.3, 3.0, 4.1, 5.5, 6.7, 6.9, 5.0, 5.7, 4.9 ],
... [ 0.2, 0.2, 0.2, 0.2, 1.2, 1.3, 1.3, 1.1, 1.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2.0 ],
... ])
>>>
>>> coeff, score, latent = princomp(A.T)
>>>
>>> print coeff
[[ 0.41517419 0.57141489 0.62117063 0.33950326]
[-0.01541612 0.77708865 -0.59860493 -0.19382391]
[ 0.84516352 -0.19642892 -0.12061365 -0.48225165]
[ 0.33629059 -0.1761645 -0.49119399 0.78396631]]
>>> print score
[[ -2.89330144e+00 -2.96862822e+00 -3.13926264e+00 -3.01020574e+00
6.62595725e-02 1.01430244e-01 3.93533691e-01 -1.22296216e+00
1.84555042e-02 1.69888585e+00 3.33347445e+00 3.55463555e+00
9.80162718e-01 2.19905031e+00 8.88472315e-01]
[ 6.73661053e-01 1.70833750e-01 2.31611394e-01 5.74752546e-02
-9.81998222e-02 -1.93525137e-01 7.25394171e-02 -5.76261923e-01
-2.51591109e-01 -2.12420839e-03 9.99064073e-01 9.65546015e-03
-7.58438762e-01 2.54491445e-01 -5.89190886e-01]
[ 4.19865554e-02 2.17054897e-01 -1.48388497e-02 -4.12181496e-02
-1.14920820e-01 -1.04179726e-01 1.94344222e-01 5.53505357e-03
-3.22578670e-02 -6.94984616e-02 -1.44191636e-01 5.00892154e-01
3.06465195e-01 -2.10470923e-01 -5.34701644e-01]
[ 4.96901878e-02 7.87014923e-02 2.02612221e-02 -9.07570427e-02
-2.16034211e-01 -1.18255189e-01 3.27127732e-03 1.77481138e-01
-5.06476323e-02 -1.00978962e-01 -1.13749635e-01 1.00785362e-01
-1.09735529e-01 2.91590384e-01 7.83771368e-02]]
>>> print latent
[ 5.07038591 0.21236083 0.05816678 0.01775315]
>>> portion_variance = map(lambda x: x / sum(latent), latent)
>>> print portion_variance
[0.94620289391157297, 0.039629415391115773, 0.010854711199668675, 0.0033129794976426439]
>>> import numpy as np
>>>
>>> A = np.array([0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1]).reshape(4, 4)
>>>
>>> print A
[[0 1 0 1]
[1 0 1 1]
[0 1 0 1]
[1 1 0 1]]
>>> print np.linalg.eig(A)
(array([ 2.65109341, -1.37720285, -0.27389055, 0. ]),
array([[ -4.25648128e-01, -4.56381963e-01, -2.61003301e-01,
4.53246652e-17],
[ -5.42229129e-01, 7.53529250e-01, -6.20457793e-01,
-5.77350269e-01],
[ -4.25648128e-01, -4.56381963e-01, -2.61003301e-01,
-5.77350269e-01],
[ -5.86203816e-01, -1.24998709e-01, 6.91944132e-01,
5.77350269e-01]]))
from glpk import LPX
# set up to maximize the objective function
lp = LPX()
lp.name = 'example'
lp.obj.maximize = True
# append 3 rows, named p, q, r
row_names = ["p", "q", "r"]
lp.rows.add(len(row_names))
for r in lp.rows:
r.name = row_names[r.index]
lp.rows[0].bounds = None, 100.0
lp.rows[1].bounds = None, 600.0
lp.rows[2].bounds = None, 150.0
# append 3 cols, named x0, x1, x2
lp.cols.add(3)
for c in lp.cols:
c.name = 'x%d' % c.index
c.bounds = 0.0, None
# set the objective coefficients and
# non-zero entries of the constraint matrix
lp.obj[:] = [ 5.0, 3.0, 2.0 ]
lp.matrix = [ 1.0, 1.0, 3.0, 10.0, 4.0, 5.0, 1.0, 1.0, 3.0 ]
# report the objective function value and structural variables
lp.simplex(msg_lev=LPX.MSG_ALL)
print 'Z = %g;' % lp.obj.value,
print '; '.join('%s = %g' % (c.name, c.primal) for c in lp.cols)
$ python ch5_lp.py
* 0: obj = 0.000000000e+00 infeas = 0.000e+00 (0)
* 2: obj = 3.666666667e+02 infeas = 0.000e+00 (0)
OPTIMAL SOLUTION FOUND
Z = 366.667; x0 = 33.3333; x1 = 66.6667; x2 = 0
## based on an analytic solution, we expect that the local minimum of
## the polynomial function f(x) occurs at x = 9/4 = 2.25
def f (x):
return x**4 - 3 * x**3 + 2
def f_prime (x):
"""first derivative of f(x)"""
return 4 * x**3 - 9 * x**2
def g (x):
return x**2
def g_prime (x):
"""first dervative of g(x)"""
return 2 * x
## inputs: f, starting value x1, termination tolerances
def gradient_descent (x1, toler, step_size, max_iter, func, func_deriv):
x0 = 0
print "\t".join([ "i", "x1", "error", "func(x1)" ])
for i in xrange(max_iter):
error = abs(x1 - x0)
print "\t".join([ "%d %0.4e %0.4e %3.4e" % (i, x1, error, func(x1)) ])
if abs(x1 - x0) <= toler:
return "local minimum:", x1
x0 = x1
x1 = x0 - step_size * func_deriv(x0)
return "halted"
## NB: step size is set here, and is not adaptive
x1 = 6
toler = 0.00001
step_size = 0.01
max_iter = 93
print gradient_descent(x1, toler, step_size, max_iter, f, f_prime)
>>> x1 = 6
>>> toler = 0.00001
>>> step_size = 0.01
>>> max_iter = 93
>>> print gradient_descent(x1, toler, step_size, max_iter, f, f_prime)
i x1 error func(x1)
0 6.0000e+00 6.0000e+00 6.5000e+02
1 6.0000e-01 5.4000e+00 1.4816e+00
2 6.2376e-01 2.3760e-02 1.4233e+00
3 6.4907e-01 2.5309e-02 1.3571e+00
4 6.7605e-01 2.6978e-02 1.2819e+00
5 7.0482e-01 2.8774e-02 1.1964e+00
6 7.3553e-01 3.0704e-02 1.0989e+00
7 7.6830e-01 3.2773e-02 9.8789e-01
8 8.0328e-01 3.4985e-02 8.6137e-01
9 8.4062e-01 3.7341e-02 7.1727e-01
10 8.8046e-01 3.9837e-02 5.5332e-01
11 9.2293e-01 4.2467e-02 3.6711e-01
12 9.6815e-01 4.5216e-02 1.5620e-01
13 1.0162e+00 4.8060e-02 -8.1809e-02
14 1.0672e+00 5.0964e-02 -3.4906e-01
15 1.1211e+00 5.3883e-02 -6.4723e-01
16 1.1778e+00 5.6753e-02 -9.7725e-01
17 1.2373e+00 5.9495e-02 -1.3389e+00
18 1.2993e+00 6.2014e-02 -1.7305e+00
19 1.3635e+00 6.4199e-02 -2.1485e+00
20 1.4294e+00 6.5925e-02 -2.5872e+00
21 1.4965e+00 6.7066e-02 -3.0389e+00
22 1.5640e+00 6.7499e-02 -3.4937e+00
23 1.6311e+00 6.7121e-02 -3.9405e+00
24 1.6970e+00 6.5862e-02 -4.3677e+00
25 1.7607e+00 6.3702e-02 -4.7644e+00
26 1.8214e+00 6.0675e-02 -5.1215e+00
27 1.8782e+00 5.6878e-02 -5.4328e+00
28 1.9307e+00 5.2460e-02 -5.6956e+00
29 1.9783e+00 4.7609e-02 -5.9105e+00
30 2.0208e+00 4.2533e-02 -6.0807e+00
31 2.0583e+00 3.7433e-02 -6.2117e+00
32 2.0908e+00 3.2490e-02 -6.3098e+00
33 2.1186e+00 2.7843e-02 -6.3815e+00
34 2.1422e+00 2.3590e-02 -6.4327e+00
35 2.1620e+00 1.9788e-02 -6.4686e+00
36 2.1784e+00 1.6456e-02 -6.4933e+00
37 2.1920e+00 1.3584e-02 -6.5101e+00
38 2.2032e+00 1.1143e-02 -6.5214e+00
39 2.2123e+00 9.0928e-03 -6.5289e+00
40 2.2196e+00 7.3880e-03 -6.5338e+00
41 2.2256e+00 5.9814e-03 -6.5370e+00
42 2.2305e+00 4.8286e-03 -6.5391e+00
43 2.2343e+00 3.8887e-03 -6.5405e+00
44 2.2375e+00 3.1257e-03 -6.5414e+00
45 2.2400e+00 2.5085e-03 -6.5420e+00
46 2.2420e+00 2.0107e-03 -6.5423e+00
47 2.2436e+00 1.6100e-03 -6.5426e+00
48 2.2449e+00 1.2882e-03 -6.5427e+00
49 2.2459e+00 1.0300e-03 -6.5428e+00
50 2.2467e+00 8.2311e-04 -6.5429e+00
51 2.2474e+00 6.5751e-04 -6.5429e+00
52 2.2479e+00 5.2506e-04 -6.5429e+00
53 2.2483e+00 4.1918e-04 -6.5429e+00
54 2.2487e+00 3.3457e-04 -6.5430e+00
55 2.2489e+00 2.6700e-04 -6.5430e+00
56 2.2492e+00 2.1305e-04 -6.5430e+00
57 2.2493e+00 1.6998e-04 -6.5430e+00
58 2.2495e+00 1.3560e-04 -6.5430e+00
59 2.2496e+00 1.0817e-04 -6.5430e+00
60 2.2497e+00 8.6287e-05 -6.5430e+00
61 2.2497e+00 6.8826e-05 -6.5430e+00
62 2.2498e+00 5.4896e-05 -6.5430e+00
63 2.2498e+00 4.3785e-05 -6.5430e+00
64 2.2499e+00 3.4921e-05 -6.5430e+00
65 2.2499e+00 2.7852e-05 -6.5430e+00
66 2.2499e+00 2.2213e-05 -6.5430e+00
67 2.2499e+00 1.7716e-05 -6.5430e+00
68 2.2499e+00 1.4129e-05 -6.5430e+00
69 2.2500e+00 1.1268e-05 -6.5430e+00
70 2.2500e+00 8.9864e-06 -6.5430e+00
('local minimum:', 2.2499646074278457)
>>>
/* foobartendr.io pricing.mod */
var x0 >= 0;
var x1 >= 0;
var x2 >= 0;
maximize
value: 5.0 * x0 + 3.0 * x1 + 2.0 * x2;
subject to
ingrednt: 1.0 * x0 + 1.0 * x1 + 3.0 * x2 <= 100;
bartendr: 10.0 * x0 + 4.0 * x1 + 5.0 * x2 <= 600;
delivery: 1.0 * x0 + 1.0 * x1 + 3.0 * x2 <= 150;
end;
$ glpsol --math pricing.mod
Reading model section from pricing.mod...
16 lines were read
Generating value...
Generating ingrednt...
Generating bartendr...
Generating delivery...
Model has been successfully generated
glp_simplex: original LP has 4 rows, 3 columns, 12 non-zeros
glp_simplex: presolved LP has 3 rows, 3 columns, 9 non-zeros
Scaling...
A: min|aij| = 1.000e+00 max|aij| = 1.000e+01 ratio = 1.000e+01
Problem data seem to be well scaled
Crashing...
Size of triangular part = 3
* 0: obj = 0.000000000e+00 infeas = 0.000e+00 (0)
* 2: obj = 3.666666667e+02 infeas = 0.000e+00 (0)
OPTIMAL SOLUTION FOUND
Time used: 0.0 secs
## http://pythonhosted.org//neurolab/lib.html
import numpy as np
import neurolab as nl
# create a neural network with 3 inputs, 4 neurons
# in the hidden layer, and 1 in the output layer
net = nl.net.newff([[0, 1],[0, 1],[0, 1]], [4, 1])
input = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]]
target = [[0], [0], [1], [1]]
# train
net.trainf = nl.train.train_gdx
error = net.train(input, target, show=25)
# test
print "expect 0", net.sim([[0, 1, 0]])
print "expect 1", net.sim([[1, 1, 0]])
# visualize
import pylab as pl
pl.plot(error)
pl.xlabel('Epoch number')
pl.ylabel('Train error')
pl.grid()
pl.show()
$ python ch5_nn.py
Epoch: 25; Error: 0.714363135207;
Epoch: 50; Error: 0.6290077045;
Epoch: 75; Error: 0.481297387105;
Epoch: 100; Error: 0.382371205263;
Epoch: 125; Error: 0.32463469683;
Epoch: 150; Error: 0.0345094306294;
The goal of learning is reached
expect 0 [[-0.01642426]]
expect 1 [[ 0.90354536]]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment