Skip to content

Instantly share code, notes, and snippets.

@adelavega
Created July 6, 2013 00:40
Show Gist options
  • Save adelavega/5938057 to your computer and use it in GitHub Desktop.
Save adelavega/5938057 to your computer and use it in GitHub Desktop.
Temporal discounting analysis code.
# Contains basic temporal discounting analysis functions.
# Does not implement multilevel analysis (will be implemented later)
# Typical workflow:
# Input: TD Trials in trialData with the following columns:
# later value
# delay
# later_choice - 1 if subject chose "later" option, 0 if not
# condition - factor indication condition
# sub - subject number (factor)
# 1. Get Discounted Value at each delay for each subject
# conditionDVs <- ddply(trialData),. (sub, condition), analyzeSubDD, low=5, high=24)
# High is the lowest and highest values shown to subject. low is the "now value" (fixed is assumed)
# 2. Next, calculate k for each condition for each subject
# conditionKs <- ddply(conditionDVs,.(sub, condition),getHyperbolicK)
# 3. Can also calculate area under the curve (AUC)
# conditionAUCs <- ddply(conditionDVs,.(sub,condition),auc)
# 4. See if subjects were bad discounters using a rule from Kirby et al
# badDiscounters <- ddply(conditionDVs,.(sub,condition,block),badDiscounters)
# Can exclude subject is they get a non-zero value
#
# Alejandro de la Vega
# July 5, 2013
library(plyr)
library(zoo)
#AUC calculations
auc <- function(df) {
id <- order(df$delay)
a <- sum(diff(df$delay[id])*rollmean(df$DV[id],2))
a1 <- as.data.frame(a)
colnames(a1)[1] <- "auc"
return(a1)
}
# Uses nls to determine k value using a hyperbolic function
# Input: data frama including paired DV & delays
# Returns k integer
getHyperbolicK <- function(DVs) {
res <- nls(DV~1/(1+k*delay),start=list(k=.03),data=DVs)
k1 <- as.numeric(coef(res)[1])
k1 <- as.data.frame(k1)
colnames(k1)[1] <- "k"
return(k1)
}
# Basic DD logistical regression choice analysis
# Input: Long format DD data for one sub [delay,later_value,choiceRT,later_choice]
# Output: Indifference values and DVs for each delay [delay,indiff,p_intercept,p_lv,DV]
analyzeSubDD <- function(DD,low=10,high=60) {
#Analyse DD to get indifference point for each cues
#clear globals vars
#clear sub vars & set up subset of data
indiffs <- NULL
DD$delay <- factor(DD$delay)
#for each delay calculate indifference points
for (d in levels(as.factor(DD$delay))) {
currdelay <-subset(DD, delay == d)
# if subject said later to all, assume indifference point is low (10.5)
if (mean(currdelay$later_choice) == 1) {
indiff = low+.5
p_intercept = 1
p_lv = 1
}
# if subject said now to all, assume indifference point is high (30.5)
else if (mean(currdelay$later_choice) == 0) {
indiff = high+.5
# no p-val here, code that this was not calculated
p_intercept = 1
p_lv = 1
}
# if they dont have all 1s or 0s
else {
F <- factor(currdelay$later_choice)
#fit logistic function
fit <- glm(F~later_value,data=currdelay,family=binomial())
indiff <- -coef(fit)[1]/coef(fit)[2]
# if indifference point doesn't make sense then set to assumed value
if (indiff < low-.5) {indiff = low+.5}
else if (indiff > high-.5) {indiff = high+.5}
# record p_values either way
p_intercept = round(summary(fit)$coef[, "Pr(>|z|)"][1],3)
p_lv = round(summary(fit)$coef[, "Pr(>|z|)"][2], 3)
}
indiffs <- rbind(indiffs,c(d,indiff,p_intercept,p_lv))
}
indiffs <- rbind(indiffs,c(0,low,NA,NA))
colnames(indiffs) <- c("delay","indiff","p_intercept","p_lv")
indiffs <- transform(indiffs, delay = as.numeric(as.character(delay)), indiff = as.numeric(as.character(indiff)))
indiffs$DV <- round(low/indiffs$indiff,4)
indiffs <- as.data.frame(indiffs)
return(indiffs)
}
# Loads all CSV files into a data set in given INPUT_PATH
loadAllCSVs <- function (INPUT_PATH) {
# Get list of files
file_list <- list.files(INPUT_PATH)
# Iterate through files and add them to "dataset"
for (file in file_list){
file <- paste(INPUT_PATH,file,sep="")
# if the merged dataset doesn't exist, create it
if (!exists("dataset")){
dataset <- read.table(file, header=TRUE, sep=",")
}
# if the merged dataset does exist, append to it
if (exists("dataset")){
temp_dataset <-read.table(file, header=TRUE, sep=",")
dataset<-rbind(dataset, temp_dataset)
rm(temp_dataset)
}
}
return(dataset)
}
# Returns if subject is a bad discounter
badDiscounters <- function(df){
#collapse across conditions
df$delay <- factor(df$delay)
df <- ddply(df,.(delay),function(df) c(mean(df$DV),mean(df$indiff)))
g<- 0
v=2
for (s in 1:nrow(df)) {
if ((v-df[s,3])>7){g=g+1}
v=df[s,3]
}
g <- as.data.frame(g)
colnames(g)[1] <- "badDiscounter"
return(g)
}
@adelavega
Copy link
Author

adelavega commented Jul 2, 2021

Great. I used this code in this publication (Experiment 2): https://www.frontiersin.org/articles/10.3389/fpsyg.2013.00355/full

Feel free to cite that or this github page, up to you.

@RacLabYa
Copy link

Hi, Adelavega. Thanks for the great code. Could you share the inact citation of the Kirby et al paper to "see if subjects were bad discounters using a rule from Kirby et al"?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment