Skip to content

Instantly share code, notes, and snippets.

@craine
Created October 5, 2017 23:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save craine/92a739b5399161255bdc94463ffd7e37 to your computer and use it in GitHub Desktop.
Save craine/92a739b5399161255bdc94463ffd7e37 to your computer and use it in GitHub Desktop.
class AlphaBetaPlayer(IsolationPlayer):
"""Game-playing agent that chooses a move using iterative deepening minimax
search with alpha-beta pruning. You must finish and test this player to
make sure it returns a good move before the search time limit expires.
"""
def get_move(self, game, time_left):
"""Search for the best move from the available legal moves and return a
result before the time limit expires.
Modify the get_move() method from the MinimaxPlayer class to implement
iterative deepening search instead of fixed-depth search.
**********************************************************************
NOTE: If time_left() < 0 when this function returns, the agent will
forfeit the game due to timeout. You must return _before_ the
timer reaches 0.
**********************************************************************
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves.
"""
self.time_left = time_left
best_move = (-1, -1)
try:
for depth in range(1, 200):
best_move = self.alphabeta(game, depth)
except SearchTimeout:
pass
# Return the best move from the last completed search iteration
return best_move
def terminal_test(self, game):
""" Return True if the game is over for the active player
and False otherwise.
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
return not bool(game.get_legal_moves())
def max_value(self, game, depth, alpha=float("-inf"), beta=float("inf")):
""" Return the value for a loss (-1) if the game is over,
otherwise return the maximum value over all legal child
nodes.
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
if depth >= self.search_depth:
return self.score(game, self)
depth += 1
if self.terminal_test(game):
return self.score()
v = float("-inf")
for m in game.get_legal_moves():
v = max(v, self.min_value(game.forecast_move(m), depth, alpha, beta))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(self, game, depth, alpha=float("-inf"), beta=float("inf")):
"""
:param game:
:param depth:
:param alpha:
:param beta:
:return:
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
if depth >= self.search_depth:
return self.score(game, self)
depth += 1
if self.terminal_test(game):
return self.score()
v = float("inf")
for m in game.get_legal_moves():
v = min(v, self.max_value(game.forecast_move(m), depth, alpha, beta))
if v >= alpha:
return v
beta = max(beta, v)
return v
def alphabeta(self, game, depth, alpha=float("-inf"), beta=float("inf")):
"""Implement depth-limited minimax search with alpha-beta pruning as
described in the lectures.
This should be a modified version of ALPHA-BETA-SEARCH in the AIMA text
https://github.com/aimacode/aima-pseudocode/blob/master/md/Alpha-Beta-Search.md
**********************************************************************
You MAY add additional methods to this class, or define helper
functions to implement the required functionality.
**********************************************************************
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
alpha : float
Alpha limits the lower bound of search on minimizing layers
beta : float
Beta limits the upper bound of search on maximizing layers
Returns
-------
(int, int)
The board coordinates of the best move found in the current search;
(-1, -1) if there are no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project tests; you cannot call any other evaluation
function directly.
(2) If you use any helper functions (e.g., as shown in the AIMA
pseudocode) then you must copy the timer check into the top of
each helper function or else your agent will timeout during
testing.
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
'''if depth == 0:
# Heuristic score from point of view of maximizing player
return self.score(game, self), (-1, -1)'''
'''depth = 1'''
best_score = alpha
'''best_move = None'''
best_move = -1, -1
for m in game.get_legal_moves():
v = self.min_value(game.forecast_move(m), depth - 1, alpha, beta)
if v > best_score:
best_score = v
best_move = m
alpha = max(alpha, v)
return best_move
@craine
Copy link
Author

craine commented Oct 5, 2017

Failed Test: 7. Test functionality of AlphaBetaPlayer.alphabeta()

AssertionError: False is not true : Your AlphaBetaAgent.alphabeta function did not call the heuristic evaluation function in all of the expected set of leaf nodes configurations in the game tree as player 1. Make sure that you are using the self.score() method to evaluate the board (not calling one of your heuristic functions directly) and verify your stopping conditions. Leaf nodes are shown as (player_1, player_2) location pairs. Optional nodes may or may not be visited depending on your termination test.

Expected leaf nodes:
{((5, 7), (3, 3)), ((4, 4), (3, 3)), ((8, 6), (3, 3)), ((5, 3), (3, 3)), ((8, 4), (3, 3)), ((7, 7), (3, 3)), ((7, 3), (3, 3))}
Optional leaf nodes:
set()
Leaf nodes your agent evaluated:
{((7, 7), (5, 4)), ((8, 4), (1, 4)), ((7, 7), (2, 5)), ((5, 7), (1, 2)), ((8, 4), (5, 4)), ((7, 7), (1, 2)), ((8, 4), (5, 2)), ((7, 7), (1, 4)), ((7, 7), (5, 2)), ((8, 6), (1, 4)), ((7, 3), (1, 2)), ((8, 4), (1, 2)), ((8, 4), (4, 5)), ((8, 4), (2, 1)), ((4, 4), (1, 2)), ((8, 6), (2, 5)), ((7, 7), (4, 5)), ((8, 6), (5, 4)), ((5, 3), (1, 2)), ((8, 6), (5, 2)), ((7, 7), (2, 1)), ((8, 4), (2, 5)), ((8, 6), (1, 2)), ((8, 6), (2, 1)), ((8, 6), (4, 5))}
Skipped nodes:
{((4, 4), (3, 3)), ((5, 7), (3, 3)), ((8, 6), (3, 3)), ((5, 3), (3, 3)), ((8, 4), (3, 3)), ((7, 7), (3, 3)), ((7, 3), (3, 3))}
Extra nodes:
{((7, 7), (5, 4)), ((8, 4), (1, 4)), ((7, 7), (2, 5)), ((5, 7), (1, 2)), ((8, 4), (5, 4)), ((7, 7), (1, 2)), ((8, 4), (5, 2)), ((7, 7), (1, 4)), ((7, 7), (5, 2)), ((8, 6), (1, 4)), ((7, 3), (1, 2)), ((8, 4), (1, 2)), ((8, 4), (4, 5)), ((8, 4), (2, 1)), ((4, 4), (1, 2)), ((8, 6), (2, 5)), ((7, 7), (4, 5)), ((8, 6), (5, 4)), ((5, 3), (1, 2)), ((8, 6), (5, 2)), ((7, 7), (2, 1)), ((8, 4), (2, 5)), ((8, 6), (1, 2)), ((8, 6), (2, 1)), ((8, 6), (4, 5))}

Test Case Details:

Heuristic: open_move_score
Depth limit: 1
Initial Board State:
0 1 2 3 4 5 6 7 8
0 | | | | | | | | | |
1 | | | | | | | | | |
2 | | | | | | | | | |
3 | | | | 2 | - | - | | | |
4 | | - | - | - | | | - | | |
5 | | | | | | - | - | | |
6 | | | - | - | | 1 | | | |
7 | | | | | | | | | |
8 | | | | | | | | | |

game._board_state:
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 51]

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment