Skip to content

Instantly share code, notes, and snippets.

@fditraglia
Last active December 15, 2019 21:08
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save fditraglia/5572705 to your computer and use it in GitHub Desktop.
Save fditraglia/5572705 to your computer and use it in GitHub Desktop.
Backward-looking Phillips Curve Simulation
#This script carries out a simulation study similar to that in Section 2 of ``Posterior Predictive Evidence on US Inflation...'' by Basturk et al (2012), exploring the effects of trend mis-specification on the estimate of the persistence of the inflation parameter.
#Library to implement Bai and Perron (2003)
library(strucchange)
set.seed(4513)
g <- 0.5 #Inflation persistence
N.sims <- 250 #Simulation repetitions
#This simulation will condition on breaks of different sizes.
breaks <- seq(from = 0.1, to = 1.5, by = 0.1)
#Initialize matrices to store the results
bias <- rel.rmse <- est.rmse <- matrix(NA, ncol = 4, nrow = length(breaks))
colnames(bias) <- colnames(rel.rmse) <- colnames(est.rmse) <- c('demeaned', 'detrended.1', 'detrended.n', 'true.model')
#Function to simulate the AR(2) process for the transitory components of real activity and inflation. I use the same parameter values as the paper except for the correlation between errors in the two equations, which I set to zero. This is so I can use OLS rather than IV, for simplicity.
tilde.sim <- function(g, N.T = 200, lambda = 0.1, phi1 = 0.1, phi2 = 0.5, var.e1 = 1, var.e2 = 0.01){
z.tilde <- arima.sim(list(order = c(2, 0, 0), ar = c(phi1, phi2)), n = N.T, sd = sqrt(var.e2))
e1 <- rnorm(N.T, sd = sqrt(var.e1))
pi.0 <- 0
pi.tilde <- rep(NA, N.T)
pi.tilde[1] <- lambda * z.tilde[1] + g * pi.0 + e1[1]
for(i in 2:N.T){
pi.tilde[i] <- lambda * z.tilde[i] + g * pi.tilde[i-1] + e1[i]
} #END for i
return(data.frame(z.tilde, pi.tilde))
}#END tilde.sim
#Function to estimate Backward-Looking Philips Curve based on simulated data.
BLPC.est <- function(data, break.size){
N.T <- nrow(data)
z.tilde <- data$z.tilde
pi.tilde <- data$pi.tilde
#Generate the trend: inflation starts at an elevated level, and falls at the half-way point of the sample
pi.trend <- c(rep(break.size, N.T %/% 2), rep(0, N.T - (N.T %/% 2)))
#No trend for z
z.trend <- rep(0, N.T)
z.obs <- z.tilde + z.trend
pi.obs <- pi.tilde + pi.trend
#BIC to choose optimal number of breakpoints
pi.breaks <- breakpoints(pi.obs ~ 1)
#Ignore possible breaks and simply demean
pi.demean <- lm(pi.obs ~ 1)$residuals
#Impose one breakpoint
pi.detrend.1 <- lm(pi.obs ~ breakfactor(pi.breaks, breaks = 1))$residuals
#Use BIC-selected number of breakpoints. If BIC selects zero breakpoints, then demean
if(length(levels(breakfactor(pi.breaks))) > 1){
pi.detrend.n <- lm(pi.obs ~ breakfactor(pi.breaks))$residuals
} else {
pi.detrend.n <- pi.demean
}#END if else
#Set up design matrix for estimator based on de-meaning observed inflation
demeaned <- data.frame(pi.demean, z.tilde, pi.demean.lag = c(0, pi.demean[-N.T]) )
#Estimate BLPC using de-meaned inflation
est.demeaned <- lm(pi.demean ~ z.tilde + pi.demean.lag - 1, data = demeaned)$coefficients[2]
#Set up design matrix for estimator that detrends by estimating a single break
detrended.1 <- data.frame(pi.detrend.1, z.tilde, pi.detrend.1.lag = c(0, pi.detrend.1[-N.T]) )
#Estimate BLPC using detrended inflation data from which a single break has been removed
est.detrended.1 <- lm(pi.detrend.1 ~ z.tilde + pi.detrend.1.lag - 1, data = detrended.1)$coefficients[2]
#Set up design matrix for estimator that detrends by using BIC to choose the number of breaks
detrended.n <- data.frame(pi.detrend.n, z.tilde, pi.detrend.n.lag = c(0, pi.detrend.n[-N.T]) )
#Eestimate BLPC using data de-trended by using BIC-optimal number of breaks
est.detrended.n <- lm(pi.detrend.n ~ z.tilde + pi.detrend.n.lag - 1, data = detrended.n)$coefficients[2]
#Set up design matrix for estimator that uses the true transitory component, i.e. the true trend is removed
true.model <- data.frame(pi.tilde, z.tilde, pi.tilde.lag = c(0, pi.tilde[-N.T]) )
#Estimate BLPC using the true transitory component
est.true <- lm(pi.tilde ~ z.tilde + pi.tilde.lag - 1, data = true.model)$coefficients[2]
#Collect and return the results
return(data.frame(demeaned = est.demeaned, detrended.1 = est.detrended.1, detrended.n = est.detrended.n, true.model = est.true, row.names = NULL))
}#END BLPC.est
#Generate simulations from the AR(2) process for the transitory components. Only need to run this once since we'll reuse the simulations when adding in the breaks
transitory <- replicate(N.sims, tilde.sim(g), simplify = FALSE)
#Convenience function to calculate RMSE
rmse <- function(x, true){
sqrt(mean((x - true)^2))
}#END rmse
#Vary the break size and reuse the transitory components from above
for(i in 1:length(breaks)){
estimates <- sapply(transitory, BLPC.est, break.size = breaks[i])
estimates <- matrix(unlist(estimates), ncol = 4, byrow = TRUE)
temp.est.rmse <- apply(estimates, 2, rmse, true = g)
temp.rel.rmse <- temp.est.rmse / temp.est.rmse[4]
temp.bias <- apply((estimates - g), 2, mean)
est.rmse[i,] <- temp.est.rmse
rel.rmse[i,] <- temp.rel.rmse
bias[i,] <- temp.bias
}#END for i
#Output results
setwd('~/Dropbox/Discussions/Tripartite_2013')
write.csv(cbind(breaks, est.rmse), 'estimator_rmse.csv', row.names = FALSE)
write.csv(cbind(breaks, bias), 'estimator_bias.csv', row.names = FALSE)
write.csv(cbind(breaks, rel.rmse), 'relative_estimator_rmse.csv', row.names = FALSE)
@PalomaChuan1
Copy link

como puedo correr una pero con datos reales...y también donde esta la variable de desempleo

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment