Skip to content

Instantly share code, notes, and snippets.

@adambear91
Last active December 22, 2020 13:15
Show Gist options
  • Star 5 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save adambear91/c9b3c02a7b9240e288cc to your computer and use it in GitHub Desktop.
Save adambear91/c9b3c02a7b9240e288cc to your computer and use it in GitHub Desktop.
Code for Bear Rand (2016) PNAS
% % % % % % % % % % % % % % % % % % %
% CODE FOR BEAR RAND (2016) PNAS
%
% This MATLAB script runs the low mutation limit (steady state) calculations
% shown in Figures 2a and S1-S3, as well as the agent-based simulations
% for higher mutation rates shown in Figure S3, and plots the results.
%
% This script calls the following functions:
%
% calculate_payoff(strategy1,strategy2,b,c,d,p)
% - returns the payoff of strategy1 against strategy2 given
% the specified values of b, c, d, and p
%
% steady_state_calc(payoffTable,N,w,a)
% - returns the steady state distribution of the population in
% the limit of low mutation, where payoffTable is an MxM matrix
% with payoffTable(i,j) being the payoff of strategy i against
% strategy j; N is the population size; w is the selection
% strength; and a is the amount of assortment
%
% agent_based_sim(N,b,c,d,p,w,mu,nGens,a)
% - returns the average value of each strategy parameter for
% an agent based simulation using the specified parameter
% values, run with a mutation rate of mu and last nGens
% generations.
% % % % % % % % % % % % % % % % % % %
%%%%%%%%%%%%%%%%%%%%%%%
%% DEFINE PARAMETERS %%
%%%%%%%%%%%%%%%%%%%%%%%
clear all
% Parameters used from main text, Figure 2
N = 50; b = 4; c = 1; d = 1; w = 6; a = 0;
% Agent sim parameters from Figure S3a in Supplementary Information
mu = .05; nGens = 1e7; % -> 10mil generations
% We loop through values of p (repeated game probability)
ps_steadyState = .01:.01:.99;
% Because the agent based simulations take much longer, we use a coarser
% grain
ps_agentSim = .1:.1:.9;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% STEADY STATE CALCULATION %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Generate Strategy Space %%%
% For the steady state calculation, we must generate a discrete strategy
% set. To do so:
% sR, s1, and sI are all restricted to being either 0 or 1;
% we discretize the deliberation threshold (T) in intervals of d/10
% generate all possible combinations of the first three strat parameters (sR,s1,sI)
% set to be 0 or 1
[x1,x2,x3] = ind2sub([2,2,2], find(zeros(2,2,2)==0));
strats = [x1 x2 x3] - 1;
% we now combine the above with the 11 possible deliberation thresholds,
% running from 0 to d in increments of d/10
Tvals = 0:d/10:d;
strats = repmat(strats,length(Tvals),1);
Tvals = sort(repmat(Tvals,1,length(strats)/length(Tvals)));
strats = [strats Tvals'];
% ... to create a total of 2*2*2*11 (88) possible strategies
M = size(strats,1); % define M to be number of strats
%%% Main Steady State Loop %%%
% initialize matrix to store steady state distributions for each p value
ssDists = zeros(M,length(ps_steadyState));
% initialize matrix to store average strat values for each p value
avgStrats = zeros(length(ps_steadyState),4);
for p=ps_steadyState
p % display p value that's running
% initialize payoff table
payoffTable=zeros(M);
% create payoff table
for i=1:M
for j=1:M
payoffTable(i,j)=calculate_payoff(strats(i,:),strats(j,:),b,c,d,p);
end
end
% run steady state using this payoff table and store result in ssDists
ssDists(:,p==ps_steadyState)=steady_state_calc(payoffTable,N,w,a);
% use ssDists to get average strategy values by taking a weighted average of
% the strat values based on the distribution in ssDists
avgStrats(p==ps_steadyState,:) = sum(repmat(ssDists(:,p==ps_steadyState),1,4)...
.*strats);
end
%%% Plot Results %%%
figure
plot(ps_steadyState,avgStrats); axis([0 1 0 1])
legend('sR','s1','sI','T');
xlabel('Probability of Repeated Game p')
ylabel('Average Value')
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% AGENT-BASED SIMULATION %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Main Loop %%%
% initialize matrix to store average strat values for each p value
avgStrats = zeros(length(ps_agentSim),4);
for p=ps_agentSim
p % display p value that's running
% run agent sim for select p value
avgStrats(p==ps_agentSim,:) = ...
agent_based_sim(N,b,c,d,p,w,mu,nGens,a);
end
%%% Plot Results %%%
figure(2)
plot(ps_agentSim,avgStrats,'--o'); axis([0 1 0 1])
legend('sR','s1','sI','T');
xlabel('Probability of Repeated Game p')
ylabel('Average Value')
function [avgStrat,params]=agent_based_sim(N,b,c,d,p,w,mu,nGens,a,popInit)
% Agent based evolutionary dynamics simulations using the Moran process with
% exponential fitness
%
% INPUTS
% N = population size
% w = selection strength
% mu = mutation rate
% nGens = number of generations per simulation
% popInit = Nx4 vector of how you want the population initialized (if this
% is left off, random initial conditions are generated)
% b,c,d,p are the game parameters used in the payoff function
% a = assortment (probability of interacting with a player having the same
% strategy as you, rather than a randomly selected player)
%
% OUTPUTS
% avgStrat = 1x4 vector with cumulative average value of the 4 strategy parameters
% [sR,s1,sI,T]
% params = matrix of parameters used (optional)v
%%%%%%%
% Initialize population - Nx4 matrix, where
% strat consists of [sR,s1,sI,T],
% with sR,s1,sI ranging from 0 to 1 and T ranging from 0 to d
% (see calculate_payoff.m for details)
% If fewer than 9 inputs were passed to the fn (ie popInit was left
% off) start from a random initial population
if nargin<10
pop=rand(N,4); % pick random values in [0,1] for each param
pop(:,4)=pop(:,4)*d; % multiply T by d so it's in [0,d]
params=[N,b,c,d,p,w,mu,nGens,a];
% Otherwise, use the initial condition you were passed
else
pop=popInit;
params=[N,b,c,d,p,w,mu,nGens,a,popInit];
if N~=length(pop(:,1)) % just in case N and the size of popInit are different, let the user know
% and then use the N implied by the initial condition
disp('N and size of initial pop vector dont match!')
pause
N=length(pop(:,1));
end
end
% Initialize the matrix that will store the payoffs that result from
% each pair of agents interacting
pairwisePayoffs=zeros(N);
for i=1:N
for j=i:N
pairwisePayoffs(i,j)=calculate_payoff(pop(i,:),pop(j,:),b,c,d,p);
pairwisePayoffs(j,i)=calculate_payoff(pop(j,:),pop(i,:),b,c,d,p);
end
end
% (Leaves diagonal as 0s, since agents dont interact with themselves)
% Initialize the average strategy vector for this simulation run
avgStrat=zeros(1,4);
%%%%%%
% Main simulation loop
for gen=1:nGens
% Display every 100,000th generation in command window
if mod(gen,100000)==0
gen
end
% To make the code run faster, we only want to update
% pairwisePayoffs in a given generation if the population make-up
% changed. So this variable will keep track of whether, in the
% curent generation, a change occurred. We initialize it as 0 (ie
% change did not occur) and then may change it to 1 below if a
% change is made to pop
changeOccurred=0;
% Did a mutation happen?
if rand<mu
% Mutation occurs:
% Pick a learner
learner=randperm(N,1);
% Pick a random strategy for her
pop(learner,:)=rand(1,4); % pick random values in [0,1] for each param
pop(learner,4)=pop(learner,4)*d; % multiply T by d so it's in [0,d]
% Indicate that a change occurred to the population
changeOccurred=1;
else
% No mutation:
% Pick a random learner
learner=randi(N);
% Pick someone to imitate, proportional to fitness
% (which is exp(w*(a * payoff against own strategy + (1-a)*(avg payoff against all other strategies))))
teacher=pickOneProportionally(...
exp(w*(...
a*diag(pairwisePayoffs)... % a * (diagonal of pairwisePayoffs)
+(1-a)*sum(pairwisePayoffs.*~eye(N),2)/(N-1)))); %(1-a) * (row-sum of all non-diagonal elements of pairwisePayoff, divided by (N-1) to make it an average)
% Check if a change will happen
changeOccurred=(pop(learner,:)~=pop(teacher,:));
% Update learner's strategy
pop(learner,:)=pop(teacher,:);
end
% Now that we are done with potential mutation/learning, we want to
% update pairwisePayoffs, but only if pop may have changed (to save
% computation time)
if changeOccurred==1
% go thru each agent, and update the payoff that agent gets
% against learner, as well as payoff learner gets against that
% agent
for i=1:N
pairwisePayoffs(i,learner)=calculate_payoff(pop(i,:),pop(learner,:),b,c,d,p);
pairwisePayoffs(learner,i)=calculate_payoff(pop(learner,:),pop(i,:),b,c,d,p);
end
end
% add this generation to avgStrat
avgStrat=avgStrat+mean(pop);
end
% Now we are done with the main simulation loop - this simulation run
% is over.
% convert avgStrat to an average from sum of averages across all gens
avgStrat=avgStrat/nGens;
end
function [index,value]=pickOneProportionally(input)
% 'input' should be an Nx1 vector of values.
% this fn then picks an entry proportional to value, and
% returns that entry's index and value.
% % set NaNs to 0 - NaNs should never occur, but just
% badIdx=find(isnan(input));
% input([badIdx badIdx+length(input)/2])=0;
% first, check if input is all 0s, in which case you pick one at random
if sum(input)==0
index=randi(length(input));
value=0;
else
% otherwise:
% normalize values so they sum to 1
inputN=input/sum(input);
% make a new vector 'scaled' is the CDF of input
% (i.e. that effectively lines up the values along the number line from 0 to 1)
% for example, say that you had
% input=[4 2 1 3 10]
% then after normalization you'd have input=[.2 .1 .05 .15 .5]
% and the CDF would be [.2 .3 .35 .5 1]
scaled=zeros(length(inputN),1);
scaled(1)=inputN(1);
for i=2:length(inputN)
scaled(i)=scaled(i-1)+inputN(i);
end
% now generate a random number between 0 and 1, and see which bin of scaled
% it lands in: that is, find the first entry in scaled which is larger than
% the random number
index=find((scaled>rand)==1,1);
value=input(index);
end
end
function payoff = calculate_payoff(s1,s2,b,c,d,p)
%% SETUP %%
% s1 and s2 are agent 1's and agent 2's respective strategy profiles
% where a strategy profile consists of [pR,p1,pH,T]
% STRATEGY PARAMETERS %
% pR is probability of cooperating when agent deliberates and realizes the
% game is repeated/reciprocal
% p1 is probability of cooperating when agent deliberates and realizes the
% game is 1-shot/social dilemma
% pI is probability of cooperating when agent uses intuition (and therefore
% can't discriminate repeated vs. 1-shot games)
% T is (threshold) max cost in [0,d] agent is willing to pay to deliberate
% ENVIRONMENT/GAME PARAMETERS %
% b is benefit of cooperation
% c is cost of cooperation
% d is max possible cost of deliberation (such that the cost of
% deliberation varies uniformly in the interval [0,d])
% p is probability of repeated game
% OUTPUTS %
% payoff is s1's payoff
%%%%%%%%%%%%%%%%%%%%%%%
%% CALCULATE PAYOFFS %%
%%%%%%%%%%%%%%%%%%%%%%%
% Rename each agent's threshold to T1 and T2, respectively
T1 = s1(4);
T2 = s2(4);
%%%%
% Calculate repeated game payoffs
% Repeated Game Payoff Matrix
repPayoffMat=[0 0
0 (b-c)];
% Rows correspond to agent 1's strategy
% where row 1 = agent 1 defects, row 2 = agent 1 cooperates
% Columns correspond to agent 2's strategy
% where col 1 = agent 2 defects, col 2 = agent 2 cooperates
% Values correspond to agent 1's payoffs
% e.g., if agent 1 cooperates and agent 2 defects,
% agent 1 gets a payoff of 0 because repPayoffMat(2,1) = 0
% We consider repeated-game payoffs in 4 situations:
% rPayDD: both agents deliberate
% rPayDI: only agent 1 deliberates
% rPayID: only agent 2 deliberates
% rPayII: neither agent deliberates
% E.g., to calculate rPayDI,
% We know agent 1 deliberates and realizes they're in a repeated game =>
% their prob. of cooperating is pR, i.e., s1(1)
% We know agent 1 uses their intuitive response =>
% their prob. of cooperating is pI, i.e., s2(3)
% Thus, rPayDI is a weighted average of the payoff agent 1 gets when both
% cooperate (repPayoffMat(2,2)), when only player 1 cooperates
% (repPayoffMat(2,1)), and so on
% where the prob. both cooperate is given by s1(1)*s2(3), prob. only
% player 1 cooperates is given by s1(1)*(1-s2(3)), and so on
rPayDD=s1(1)*s2(1)*repPayoffMat(2,2) + s1(1)*(1-s2(1))*repPayoffMat(2,1) + ...
(1-s1(1))*s2(1)*repPayoffMat(1,2) + (1-s1(1))*(1-s2(1))*repPayoffMat(1,1);
rPayDI=s1(1)*s2(3)*repPayoffMat(2,2) + s1(1)*(1-s2(3))*repPayoffMat(2,1) + ...
(1-s1(1))*s2(3)*repPayoffMat(1,2) + (1-s1(1))*(1-s2(3))*repPayoffMat(1,1);
rPayID=s1(3)*s2(1)*repPayoffMat(2,2) + s1(3)*(1-s2(1))*repPayoffMat(2,1) + ...
(1-s1(3))*s2(1)*repPayoffMat(1,2) + (1-s1(3))*(1-s2(1))*repPayoffMat(1,1);
rPayII=s1(3)*s2(3)*repPayoffMat(2,2) + s1(3)*(1-s2(3))*repPayoffMat(2,1) + ...
(1-s1(3))*s2(3)*repPayoffMat(1,2) + (1-s1(3))*(1-s2(3))*repPayoffMat(1,1);
%%%
% Calculate 1-shot game payoffs
% (these work the same way as the repeated game payoffs)
% 1-shot Game Payoff Matrix
onesPayoffMat=[0 b
-c (b-c)];
oPayDD=s1(2)*s2(2)*onesPayoffMat(2,2) + s1(2)*(1-s2(2))*onesPayoffMat(2,1) + ...
(1-s1(2))*s2(2)*onesPayoffMat(1,2) + (1-s1(2))*(1-s2(2))*onesPayoffMat(1,1);
oPayDI=s1(2)*s2(3)*onesPayoffMat(2,2) + s1(2)*(1-s2(3))*onesPayoffMat(2,1) + ...
(1-s1(2))*s2(3)*onesPayoffMat(1,2) + (1-s1(2))*(1-s2(3))*onesPayoffMat(1,1);
oPayID=s1(3)*s2(2)*onesPayoffMat(2,2) + s1(3)*(1-s2(2))*onesPayoffMat(2,1) + ...
(1-s1(3))*s2(2)*onesPayoffMat(1,2) + (1-s1(3))*(1-s2(2))*onesPayoffMat(1,1);
oPayII=s1(3)*s2(3)*onesPayoffMat(2,2) + s1(3)*(1-s2(3))*onesPayoffMat(2,1) + ...
(1-s1(3))*s2(3)*onesPayoffMat(1,2) + (1-s1(3))*(1-s2(3))*onesPayoffMat(1,1);
%%%
% Calculate agent 1's final payoff based on
% Prob. of repeated game (p) vs. 1-shot game (1-p)
% Prob. agents 1 and 2 deliberate (T1/d and T2/d) vs. use intuition (1-T1/d and 1-T2/d)
% Average costs agent 1 and 2 pay when deliberating (T1/2 and T2/2)
% (because the cost of deliberation varies uniformly, an agent who's
% willing to pay max cost of deliberation T pays an average cost of
% T/2 to deliberate)
% We take a weighted avg. of agent 1's payoff in 4 situations:
% 1. payoff when both agents deliberate = (p*rPayDD + (1-p)*oPayDD - T1/2)
% this happens with probability (T1/d)*(T2/d)
% 2. payoff when only agent 1 deliberates = (p*rPayDI + (1-p)*oPayDI - T1/2)
% this happens with probability (T1/d)*(1 - T2/d)
% ... and so on
payoff = (T1/d)*(T2/d)*(p*rPayDD + (1-p)*oPayDD - T1/2) + ...
(T1/d)*(1 - T2/d)*(p*rPayDI + (1-p)*oPayDI - T1/2) + ...
(1 - T1/d)*(T2/d)*(p*rPayID + (1-p)*oPayID) + ...
(1 - T1/d)*(1 - T2/d)*(p*rPayII + (1-p)*oPayII);
end
function [ssDist,transitionPs]=steady_state_calc(payoffTable,N,w,a)
% Calculate the steady state distribution of the Moran process with
% exponential fitness, assuming the limit of low mutation
%
% INPUTS
% payoffTable is an MxM matrix of payoffs, where M is the number of strategies
% N is the population size
% w is the selection strength (0=neutral drift, ->Inf=deterministic)
% a is assortment (probability of interacting with a player having the same
% strategy as you, rather than a randomly selected player)
%
% OUTPUTS
% ssDist is an Mx1 vector with the steady state frequency of each strategy
% transitionPs is an MxM vector with transition probilities between
% homogeneous states of the population
numStrats=length(payoffTable);
transitionPs=zeros(numStrats);
% To do the steady state calc, we need to assemble a transition matrix
% betewen homogeneous states of the population
for s1=1:numStrats
for s2=1:numStrats
if s1~=s2
% for each combination of strategies s1 and s2, the transition prob
% going from s1 to s2 is the product of the probability a s1 mutant
% arises, and the probability that mutant goes to fixation
%prob s1 mutant arises %prob that s1 goes to fixation
%(assuming uniform mutation)
transitionPs(s1,s2)=(1/(numStrats-1)) *calc_invasionprob(payoffTable,N,w,s1,s2,a);
end
end
end
% The diagonal entries of the transition matrix are the probability you
% stay in the given state - which is just 1 minus all the transition
% probabilities out of that state
for i=1:numStrats
transitionPs(i,i)=1-sum(transitionPs(:,i));
end
% Now that you've got the transition matrix, find the eigenvector with
% eigenvalue 1, and that's your steady state distribution!
[vec,val]=eig(transitionPs);
[~, idxofEigWithVal1]=min(abs(diag(val)-1)); % For some reason, sometimes Matlab gets the eigenvalue wrong s/t its not literally 1, just very close
% normalize the eigenvector corresponding to eigenvalue so that it sums to 1, and you're done!
ssDist=vec(:,idxofEigWithVal1)./sum(vec(:,idxofEigWithVal1));
end
function rhoBA=calc_invasionprob(payoffTable,N,w,sA,sB,a)
% here we calculate rhoAB, the probability that a sA mutant goes to fixation in
% a sB population
% this probability is the inverse of (1 + a sum of products) so I
% start by intitalizing rhoInv to 1, and then add on the sum of series of
% products, and then take the inverse
rhoInv=1;
% loop through each possible number of steps the sA mutant would need to go through in order to take over
% (this ranges from 1, if there are already N-1 sA players,
% to N-1, if there is only the 1 initial sA mutant)
for k=1:N-1
% same thing - initialize the eventual product of various terms as 1,
% then multiply it by stuff
thisProd=1;
% loop through each possible make-up of the population (indexed by k=the
% number of sA players) that would need to be traversed to get from
% there being only 1 sA player to there being k sA players
for i=1:k
% for each population make-up, multiply by the ratio of the fitness
% of an sA player in the current pop to the fitness of an sB player
% in the current pop (where there are i sA players)
% fitness = exp( w * expectedpayoff)
% expectedpayoff = a * payoff against your own strategy
% + (1-a) *( (payoff against sB * fraction of other players who are sB)
% +(payoff against sA * fraction of other players who are sA)
% )
fi=exp(w* (a*payoffTable(sA,sA)+(1-a)*(payoffTable(sA,sB)*(N-i)/(N-1) + payoffTable(sA,sA)*(i-1)/(N-1))));
gi=exp(w* (a*payoffTable(sB,sB)+(1-a)*(payoffTable(sB,sB)*(N-i-1)/(N-1) + payoffTable(sB,sA)*(i)/(N-1) )));
% multiple the current state of the product by the ratio for this value of i
thisProd=thisProd*(gi/fi);
end
% now that we've finished with the product for k, add it onto the sum
rhoInv=rhoInv+thisProd;
end
% now that we've finished with the sum, take the inverse
rhoBA=1/rhoInv;
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment