Skip to content

Instantly share code, notes, and snippets.

@adambear91
Created December 22, 2025 19:42
Show Gist options
  • Select an option

  • Save adambear91/805197a6f8bf78b5702085aa837c3599 to your computer and use it in GitHub Desktop.

Select an option

Save adambear91/805197a6f8bf78b5702085aa837c3599 to your computer and use it in GitHub Desktop.
Code for blog post on hindsight bias
# Setup -------------------------------------------------------------------
set.seed(223)
library(tidyverse)
theme_set(theme_classic())
# Compute expected absolute error of probability estimate
abs_error <- function(x) {
2 * x * (1 - x)
}
# Compute expected guess with memory noise (without knowledge of answer)
expected_guess_from_memory <- function(m, noise, tol = 0.01) {
logit_vals <- seq(-10, 10, tol)
bayes_numerator <- dnorm(logit_vals, qlogis(m), noise) * dlogis(logit_vals)
as.numeric((bayes_numerator / sum(bayes_numerator)) %*% plogis(logit_vals))
}
# Compute expected guess with memory noise + knowledge that answer is FALSE (0)
# (Same as expected error when all statements resolve to FALSE)
expected_guess_with_answer_zero <- function(m, noise, tol = 0.01) {
logit_vals <- seq(-10, 10, tol)
bayes_numerator <- dnorm(logit_vals, qlogis(m), noise) * dlogis(logit_vals) * (1 - plogis(logit_vals))
as.numeric((bayes_numerator / sum(bayes_numerator)) %*% plogis(logit_vals))
}
# Compute expected error with memory noise (without knowledge of answer)
expected_error_from_memory <- function(m, noise, tol = 0.01) {
logit_vals <- seq(-10, 10, tol)
bayes_numerator <- dnorm(logit_vals, qlogis(m), noise) * dlogis(logit_vals)
as.numeric(
(bayes_numerator / sum(bayes_numerator)) %*% abs_error(plogis(logit_vals))
)
}
# Main Simulation ---------------------------------------------------------
# Number of samples
N <- 100000
# Sample binary truth values of statements, with 50% chance of TRUE
T <- sample(c(0, 1), N, replace = TRUE)
# Sample recorded probability estimates based on beta distribution
X <- rbeta(N, 1 + T, 2 - T)
# ... this transforms all estimates to be relative to T = FALSE
X_mirrored <- if_else(T == 1, 1 - X, X)
# Std. deviation of Gaussian memory noise in log-odds
noise <- 0.5
# Sample memory signals from X, with added noise
M <- plogis(rnorm(N, qlogis(X), noise))
# ... this transforms remembered estimates to be relative to T = FALSE
M_mirrored <- if_else(T == 1, 1 - M, M)
# Compute Bayesian estimates from memory without knowledge of answer
M_corrected <- map_dbl(seq_along(M), \(i) expected_guess_from_memory(M[i], noise))
# Compute Bayesian estimates from memory with knowledge of answer (where T = FALSE)
M_with_answer <- map_dbl(seq_along(M), \(i) expected_guess_with_answer_zero(M_mirrored[i], noise))
# Compute expected error from memory without knowledge of answer
M_exp_error <- map_dbl(seq_len(length(M)), \(i) expected_error_from_memory(M[i], noise))
# Expectations of error (all approximately equal by law of iterated expectations):
mean(abs_error(X)) # = mean error in initial judgments
mean(M_exp_error) # = expected error from memory without answer
mean(M_with_answer) # = expected error from memory with answer
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment