Skip to content

Instantly share code, notes, and snippets.

@hfm
Forked from showyou/em.py
Created November 5, 2012 15:34
Show Gist options
  • Save hfm/4017811 to your computer and use it in GitHub Desktop.
Save hfm/4017811 to your computer and use it in GitHub Desktop.
EM Algorithm
import numpy as np
import numpy.random as rand
import matplotlib.pyplot as plt
def mixture_gaussian(i):
pi_0 = 0.3
if rand.random() < pi_0:
return rand.normal(-5, 1)
else:
return rand.normal(5, 4)
N = 1000
x = [mixture_gaussian(i) for i in range(N)]
#n, bins, patches = hist(x, 100, normed=1, facecolor='green', alpha=0.75)
#show()
def dnorm(x, m, s):
return np.exp(-((x - m) ** 2) / (2 * s)) / np.sqrt(2 * np.pi * s)
def log_likelihood(x, m, s, pi):
return sum([np.log(pi[0] * dnorm(x[i], m[0], s[0]) + pi[1] * dnorm(x[i], m[1], s[1]))
for i in range(len(x))])
mu = [-1.0, -1.0]
sigma = [1.0, 2.0]
pi = [0.5, 0.5]
gamma_0 = []
gamma_1 = []
n_k = [0, 0]
new_log_likelihood = log_likelihood(x, mu, sigma, pi)
for step in range(1000):
old_log_likelihood = new_log_likelihood
# E-step
gamma_0 = [pi[0] * dnorm(x[j], mu[0], sigma[0]) /
sum([pi[i] * dnorm(x[j], mu[i], sigma[i]) for i in range(2)])
for j in range(len(x))]
gamma_1 = map((lambda x: 1 - x), gamma_0)
#print gamma_0
# M-step
n_k[0] = sum(gamma_0)
n_k[1] = sum(gamma_1)
lenx = len(x)
mu[0] = sum([gamma_0[i] * x[i] / n_k[0] for i in range(lenx)])
mu[1] = sum([gamma_1[i] * x[i] / n_k[1] for i in range(lenx)])
sigma[0] = sum([(gamma_0[i] * (x[i] - mu[0]) ** 2) / n_k[0] for i in range(lenx)])
sigma[1] = sum([(gamma_1[i] * (x[i] - mu[1]) ** 2) / n_k[1] for i in
range(lenx)])
pi[0] = n_k[0] / N
pi[1] = 1 - pi[0]
new_log_likelihood = log_likelihood(x, mu, sigma, pi)
if(abs(new_log_likelihood - old_log_likelihood) < 0.01):
break
print mu
print sigma
print pi
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment