Skip to content

Instantly share code, notes, and snippets.

@nipunbatra
Created February 20, 2013 17:02
Show Gist options
  • Save nipunbatra/4997101 to your computer and use it in GitHub Desktop.
Save nipunbatra/4997101 to your computer and use it in GitHub Desktop.
{
"metadata": {
"name": "HMM_Viterbi"
},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "heading",
"level": 1,
"metadata": {},
"source": "Unfair Casino Problem HMM"
},
{
"cell_type": "code",
"collapsed": false,
"input": "\"\"\"\n========================================\nUnfair Casino Problem using Discrete HMM\n========================================\n\nThis script shows how to use Decoding in a Discrete(Multinomial) HMM, which means\ngiven the model parameters and an observed sequence we wish to find the most likely hidden state sequence\ngenerating the same.\nIt uses the model given in http://www.rose-hulman.edu/~shibberu/MA490/MA490HMM.html#Example_Dishonest_Casino_\nOne may also refer to a great lecture series at http://vimeo.com/7175217\n\n\"\"\"\nprint __doc__\nimport datetime\nimport numpy as np\nimport pylab as pl\nfrom sklearn.hmm import MultinomialHMM\nimport random\n\n###############################################################################\n################################################################################\n#Specifying HMM parameters\n\n#We can obtain the starting probability by finding the stationary distribution from the transition matrix\n#This denotes that the probabilities of starting in Fair state and Biased state are 2/3 and 1/3 respectively\nstart_prob = np.array([.67, 0.33])\n\n#The Transition Matrix\n#This means that given that current state is Fair, probability of remaining in fair state is .95 and transitioning to\n#Biased state is .05. Similarly, given that current state is Biased, probability of transitioning to fair state is .1\n#and remaining in the same state is .9\ntrans_mat = np.array([[0.95, 0.05],\n [.1, 0.9]])\n\n#The Emission Matrix\n#This means that if the dice is fair, all 6 outcomes (1 to 6) have same probability,\n#whereas, for the biased dice, 6 is observed with probability 0.5 and the remaining \n#with a probability 0.1 each \nemiss_prob=np.array([[1.0/6,1.0/6,1.0/6,1.0/6,1.0/6,1.0/6],[.1,.1,.1,.1,.1,.5]])\n\n#Initializing the model with 2 state(n_components) and specifying the transition matrix and number of iterations\nmodel = MultinomialHMM(n_components=2,transmat=trans_mat,n_iter=10)\n\n#Number of emitted symbols\nmodel.n_symbols=6\n#Assigning emission probability, start probabilty and random seed\nmodel.emissionprob_=emiss_prob\nmodel.startprob_=start_prob\nmodel.prng=np.random.RandomState(10)\n\n#dice_obs is a random sequence of observations. Note that some extra 6's have been added in middle of the sequence\n#which might correspond to use of Biased Dice \ndice_obs=[random.randint(1,6) for i in range(0,20)]+[6,6,6,6,6,6]+[random.randint(1,6) for i in range(0,20)]\nprint \"Dice Observations\\n\",dice_obs\n\n#Note that the emitted symobols must vary from 0 through 5. Thus we subtract 1 from each element of dice_obs\nobs=[x-1 for x in dice_obs]\n\n#Finding the likely state sequence and log probabilty of the path using Viterbi algorithm and parameters specified in the model\nlogprob, state_sequence = model.decode(obs)\nprint \"\\nExpected State Sequence- F for fair and B for Biased\"\nstate_sequence_symbols=['F' if x==0 else 'B' for x in state_sequence]\nprint state_sequence_symbols\n\nprint \"Log probability of this most likely path is: \",logprob\nprint \"Probability of this most likely path is : \",np.exp(logprob)\n\n",
"language": "python",
"metadata": {},
"outputs": [
{
"output_type": "stream",
"stream": "stdout",
"text": "\n========================================\nUnfair Casino Problem using Discrete HMM\n========================================\n\nThis script shows how to use Decoding in a Discrete(Multinomial) HMM, which means\ngiven the model parameters and an observed sequence we wish to find the most likely hidden state sequence\ngenerating the same.\nIt uses the model given in http://www.rose-hulman.edu/~shibberu/MA490/MA490HMM.html#Example_Dishonest_Casino_\nOne may also refer to a great lecture series at http://vimeo.com/7175217\n\n\nDice Observations\n"
},
{
"output_type": "stream",
"stream": "stdout",
"text": "[4, 3, 5, 5, 4, 1, 6, 6, 1, 5, 5, 6, 5, 2, 1, 5, 1, 1, 4, 5, 6, 6, 6, 6, 6, 6, 1, 1, 6, 1, 2, 3, 5, 5, 1, 3, 4, 2, 2, 2, 6, 2, 5, 6, 5, 4]\n\nExpected State Sequence- F for fair and B for Biased\n['F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'B', 'B', 'B', 'B', 'B', 'B', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F']\nLog probability of this most likely path is: -84.0040045506\nProbability of this most likely path is : "
},
{
"output_type": "stream",
"stream": "stdout",
"text": " 3.29248925166e-37\n"
}
],
"prompt_number": 1
},
{
"cell_type": "code",
"collapsed": false,
"input": "",
"language": "python",
"metadata": {},
"outputs": []
}
],
"metadata": {}
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment