Skip to content

Instantly share code, notes, and snippets.

@RuizSerra
Last active December 30, 2022 03:35
Show Gist options
  • Save RuizSerra/80d4175a33f3ba01ece7a98335786981 to your computer and use it in GitHub Desktop.
Save RuizSerra/80d4175a33f3ba01ece7a98335786981 to your computer and use it in GitHub Desktop.
A MESA Python implementation of the Minority Game
"""
A Python MESA implementation of the Minority Game (Challet and Zhang, 1997)
Author: Jaime Ruiz Serra
Date: Dec 2022
"""
import mesa
import numpy as np
from scipy.stats import bernoulli
# Convert a binary array to its decimal equivalent (e.g. [0, 1, 1] -> 3)
bin2dec = lambda x: np.sum(np.array([2**i for i in reversed(range(x.shape[-1]))]) * x, axis=-1)
class Agent(mesa.Agent):
"""Minority Game agent"""
def __init__(self, unique_id, model, M, S):
super().__init__(unique_id, model)
self.M = M
self.S = S
self.score = 0
self._strategies = bernoulli.rvs(p=0.5, size=(S, 2**M))
self._run_strategies = lambda x: self._strategies[:, bin2dec(x)]
self._strategies_fitness = np.zeros(S).astype(np.int32)
self._current_choices = None
self.action = None
def _choose_action(self):
"""Get the outcomes for all strategies from given input and return chosen action"""
x = self.model.get_history(self.M)
# Copmute outputs for all strategies
self._current_choices = self._run_strategies(x)
# Chosen action comes from best strategy
self.action = self._current_choices[np.argmax(self._strategies_fitness)]
def _update_scores(self):
"""Update the agent's score and strategies' fitness values from current game outcome"""
if self.action:
if self.action == self.model.outcome:
self.score += 1
self._strategies_fitness = (self._current_choices == self.model.outcome).astype(np.int32) + self._strategies_fitness
def step(self):
self._update_scores()
self._choose_action()
class MinorityGameModel(mesa.Model):
"""A model with some number of agents."""
def __init__(self, N, M=5, S=4):
# Simulation configuration
self.schedule = mesa.time.RandomActivation(self)
self.running = True
# Agent configuration
self.num_agents = N
if isinstance(M, int):
M = np.ones(N).astype(np.int32) * M
if isinstance(S, int):
S = np.ones(N).astype(np.int32) * S
# Create agents
for i in range(self.num_agents):
a = Agent(i, self, M[i], S[i])
self.schedule.add(a)
# Create synthetic history to begin with
self.history = bernoulli.rvs(p=0.5, size=M.max())
self.attendance = None
# Data collection
self.datacollector = mesa.DataCollector(
model_reporters={
"Attendance": self.compute_attendance,
},
agent_reporters={
"Score": "score"
}
)
def step(self):
self.datacollector.collect(self)
self.schedule.step()
# Compute and store attendance and minority side (outcome)
self.attendance = self.compute_attendance()
self.outcome = int(self.attendance <= int(self.num_agents/2))
self.history = np.append(self.history, self.outcome)
def get_history(self, M):
return self.history[-M:]
def compute_attendance(self):
"""Compute the attendance for side A in the current tick"""
actions = [agent.action for agent in self.schedule.agents]
# For the first iteration of the game we compute the attendance before
# any actions were actually chosen (t=0), so we ignore the first value
if actions[0] is None:
return np.NaN
return np.sum(actions)
if __name__ == '__main__':
import pandas as pd
params = {"N": 101, "M": 5, "S": 4}
results = mesa.batch_run(
MinorityGameModel,
parameters=params,
iterations=5,
max_steps=100,
number_processes=6,
data_collection_period=1,
display_progress=True,
)
results_df = pd.DataFrame(results)
print()
print(results_df.head())
print(results_df.shape)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment