Created
November 8, 2011 02:13
-
-
Save rioleo/1346816 to your computer and use it in GitHub Desktop.
eulav-Iterations-Tnega
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# valueIterationAgents.py | |
# ----------------------- | |
# Licensing Information: Please do not distribute or publish solutions to this | |
# project. You are free to use and extend these projects for educational | |
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by | |
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). | |
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html | |
import mdp, util | |
from learningAgents import ValueEstimationAgent | |
class ValueIterationAgent(ValueEstimationAgent): | |
""" | |
* Please read learningAgents.py before reading this.* | |
A ValueIterationAgent takes a Markov decision process | |
(see mdp.py) on initialization and runs value iteration | |
for a given number of iterations using the supplied | |
discount factor. | |
""" | |
def __init__(self, mdp, discount = 0.9, iterations = 100): | |
""" | |
Your value iteration agent should take an mdp on | |
construction, run the indicated number of iterations | |
and then act according to the resulting policy. | |
Some useful mdp methods you will use: | |
mdp.getStates() | |
mdp.getPossibleActions(state) | |
mdp.getTransitionStatesAndProbs(state, action) | |
mdp.getReward(state, action, nextState) | |
""" | |
self.mdp = mdp | |
self.discount = discount | |
self.iterations = iterations | |
self.values = util.Counter() # A Counter is a dict with default 0 | |
# | |
for iteration in range(iterations): | |
for state in mdp.getStates(): | |
maxUtility = 0 | |
maxReward = 0 | |
for action in mdp.getPossibleActions(state): | |
tempUtility = 0 | |
for i in mdp.getTransitionStatesAndProbs(state, action): | |
nextstate = i[0] | |
nextprob = i[1] | |
nextreward = mdp.getReward(state, action, nextstate) | |
tempUtility += nextprob*self.values[nextstate] | |
if tempUtility >= maxUtility: | |
maxUtility = tempUtility | |
maxReward = nextreward | |
self.values[state] = maxReward + maxUtility*discount | |
def getValue(self, state): | |
""" | |
Return the value of the state (computed in __init__). | |
""" | |
return self.values[state] | |
def getQValue(self, state, action): | |
""" | |
The q-value of the state action pair | |
(after the indicated number of value iteration | |
passes). Note that value iteration does not | |
necessarily create this quantity and you may have | |
to derive it on the fly. | |
""" | |
maxReward = 0 | |
for stateProb in self.mdp.getTransitionStatesAndProbs(state, action): | |
nextstate = stateProb[0] | |
nextprob = stateProb[1] | |
tempReward = nextprob * self.values[nextstate] | |
if tempReward >= maxReward: | |
maxReward = tempReward | |
return maxReward | |
#*self.values(state) | |
def getPolicy(self, state): | |
""" | |
The policy is the best action in the given state | |
according to the values computed by value iteration. | |
You may break ties any way you see fit. Note that if | |
there are no legal actions, which is the case at the | |
terminal state, you should return None. | |
""" | |
if not self.mdp.getPossibleActions(state): | |
return None | |
maxi = 0 | |
bestaction = "" | |
#print "States", self.mdp.getPossibleActions((0,0)) | |
for a in self.mdp.getPossibleActions(state): | |
#print "x", a | |
r = self.getQValue(state, a) | |
if r > maxi: | |
maxi = r | |
bestaction = a | |
print "Best action", bestaction | |
return bestaction | |
#return None | |
#util.raiseNotDefined() | |
def getAction(self, state): | |
"Returns the policy at the state (no exploration)." | |
return self.getPolicy(state) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment