Skip to content

Instantly share code, notes, and snippets.

@dhguhl
Created March 18, 2018 13:31
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save dhguhl/46bd5dabeb5bb1931e81e1b0354cfe3f to your computer and use it in GitHub Desktop.
Save dhguhl/46bd5dabeb5bb1931e81e1b0354cfe3f to your computer and use it in GitHub Desktop.
Example of estimating a "mixed" logit model in Stan
library(rstan)
library(mlogit)
data("TravelMode", package = "AER")
TM = mlogit.data(TravelMode, choice = "choice", shape = "long",
alt.var = "mode")
head(TM)
# individual mode choice wait vcost travel gcost income size
# 1.air 1 air FALSE 69 59 100 70 35 1
# 1.train 1 train FALSE 34 31 372 71 35 1
# 1.bus 1 bus FALSE 35 25 417 70 35 1
# 1.car 1 car TRUE 0 10 180 30 35 1
# 2.air 2 air FALSE 64 58 68 68 30 2
# 2.train 2 train FALSE 44 31 354 84 30 2
f = mFormula(choice ~ vcost + travel | income + size)
M = model.matrix(f, TM)
head(M)
# train:(intercept) bus:(intercept) car:(intercept) vcost travel train:income bus:income car:income train:size bus:size car:size
# 1.air 0 0 0 59 100 0 0 0 0 0 0
# 1.train 1 0 0 31 372 35 0 0 1 0 0
# 1.bus 0 1 0 25 417 0 35 0 0 1 0
# 1.car 0 0 1 10 180 0 0 35 0 0 1
# 2.air 0 0 0 58 68 0 0 0 0 0 0
# 2.train 1 0 0 31 354 30 0 0 2 0 0
m0 = mlogit(f, TM)
summary(m0)
# Estimate Std. Error t-value Pr(>|t|)
# train:(intercept) 2.74929304 0.72191079 3.8084 0.0001399 ***
# bus:(intercept) 2.28584868 0.82572650 2.7683 0.0056352 **
# car:(intercept) 0.19428607 0.75802006 0.2563 0.7977136
# vcost -0.00776260 0.00644501 -1.2044 0.2284216
# travel -0.00357252 0.00074996 -4.7636 1.902e-06 ***
# train:income -0.05732888 0.01240471 -4.6215 3.809e-06 ***
# bus:income -0.03598870 0.01329984 -2.7059 0.0068109 **
# car:income -0.00851538 0.01092006 -0.7798 0.4355135
# train:size 0.41042832 0.23948952 1.7138 0.0865722 .
# bus:size -0.16998677 0.34438757 -0.4936 0.6215947
# car:size 0.70780836 0.21164149 3.3444 0.0008247 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# Log-Likelihood: -240.57
# McFadden R^2: 0.15221
# Likelihood ratio test : chisq = 86.38 (p.value = 2.519e-15)
mnl = "
data {
int<lower=1> N;
int<lower=1> K;
int<lower=2> J;
int<lower=1, upper=J> y[N];
matrix[N*J, K] x;
}
parameters {
vector[K] beta;
}
model {
beta ~ normal(0, 10);
for (n in 1:N) {
y[n] ~ categorical_logit(x[((n-1)*J+1):(n*J),] * beta);
}
}
"
TM$alt = 1:4
y = TM[TM$choice, "alt"]
x = M
m1 = stan(model_code = mnl, iter = 3500L, seed = 1, warmup = 1000L, cores = 4L,
data = list(N = length(y), K = ncol(x), J = max(y), y, x))
print(m1, digits = 3)
# mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
# beta[1] 2.814 0.013 0.733 1.416 2.310 2.802 3.303 4.244 3160 1.000
# beta[2] 2.375 0.014 0.841 0.782 1.806 2.359 2.935 4.046 3435 1.000
# beta[3] 0.171 0.013 0.759 -1.287 -0.350 0.160 0.681 1.674 3466 1.001
# beta[4] -0.008 0.000 0.007 -0.021 -0.013 -0.008 -0.004 0.005 6354 1.000
# beta[5] -0.004 0.000 0.001 -0.005 -0.004 -0.004 -0.003 -0.002 10000 1.000
# beta[6] -0.059 0.000 0.013 -0.085 -0.067 -0.059 -0.051 -0.035 4452 1.000
# beta[7] -0.037 0.000 0.014 -0.064 -0.046 -0.037 -0.028 -0.011 4898 1.000
# beta[8] -0.009 0.000 0.011 -0.030 -0.016 -0.009 -0.001 0.013 4654 1.000
# beta[9] 0.427 0.004 0.247 -0.048 0.262 0.422 0.589 0.928 4359 1.000
# beta[10] -0.206 0.005 0.353 -0.931 -0.431 -0.187 0.035 0.446 5250 1.000
# beta[11] 0.738 0.003 0.218 0.326 0.587 0.732 0.883 1.179 4690 1.001
# lp__ -246.204 0.038 2.426 -251.813 -247.623 -245.840 -244.444 -242.543 4085 1.001
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment