Created
January 11, 2018 18:18
-
-
Save mcfrank/689fe4a5627a8b87003a0d2540e0ee86 to your computer and use it in GitHub Desktop.
Sparse autoencoder version of Twomey & Westermann (2017), Dev Sci
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# very quick re-implementation of a loose version of Experiment 1 from | |
# Twomey & Westermann 2017 | |
# Mike Frank (mcfrank@stanford.edu) | |
library(autoencoder) | |
library(tidyverse) | |
# need this to evaluate pairwise distance in training sets | |
euclidean_pairwise <- function (x) { | |
dx <- as.matrix(dist(x)) | |
md <- mean(dx[row(dx) == col(dx) + 1]) | |
return(md) | |
} | |
# note that T&W don't give the actual orders they used, so I did the "curiosity" | |
# order and a minimum distance order. | |
curiosity_training <- matrix(c(1, 5, 1, 5, | |
5, 1, 5, 1, | |
5, 5, 1, 1, | |
1, 1, 5, 5, | |
2, 4, 2, 4, | |
2, 2, 4, 4, | |
4, 4, 2, 2, | |
4, 2, 4, 2), | |
nrow = 8, ncol = 4, byrow = TRUE) | |
euclidean_pairwise(curiosity_training) | |
# this may not be the min, but it's the best I could do quickly. | |
mindist_training <- matrix(c(5, 5, 1, 1, | |
4, 4, 2, 2, | |
4, 2, 4, 2, | |
5, 1, 5, 1, | |
2, 4, 2, 4, | |
1, 5, 1, 5, | |
2, 2, 4, 4, | |
1, 1, 5, 5), | |
nrow = 8, ncol = 4, byrow = TRUE) | |
euclidean_pairwise(mindist_training) | |
# they do 20 sweeps, we can do two, shouldn't matter much. | |
curiosity_training <- rbind(curiosity_training, curiosity_training) | |
mindist_training <- rbind(mindist_training, mindist_training) | |
test_peripheral <- matrix(c(1, 1, 1, 1, | |
5, 5, 5, 5), | |
nrow = 2, ncol = 4, byrow = TRUE) | |
test_central <- matrix(c(3, 3, 3, 3), | |
nrow = 1, ncol = 4, byrow = TRUE) | |
# key step is to use the autoencoder::autoencode package | |
# this is a "sparse autoencoder" which is slightly different than the vanilla one | |
# I tried to remove the sparsity constraints, but this formulation uses the rho | |
# parameter, which is average desired activation for the hidden units and I didn't | |
# know what value to use (in practice the result doesn't depend on that though. | |
N.hidden <- 3 | |
lambda <- 0 | |
beta <- 0 | |
rho <- .1 | |
epsilon <- .01 | |
# train autocencoders | |
curiosity <- autoencode(curiosity_training, | |
N.hidden = N.hidden, unit.type = "logistic", | |
lambda = lambda, beta = beta, rho = rho, epsilon = epsilon, | |
rescale.flag = TRUE) | |
mindist <- autoencode(mindist_training, | |
N.hidden = N.hidden, unit.type = "logistic", | |
lambda = lambda, beta = beta, rho = rho, epsilon = epsilon, | |
rescale.flag = TRUE) | |
# test autoencoders | |
error <- tribble(~training, ~test, ~error, | |
"curiosity", "central", predict(curiosity, test_central, hidden.output = FALSE)$mean.error, | |
"curiosity", "peripheral", predict(curiosity, test_peripheral, hidden.output = FALSE)$mean.error, | |
"mindist", "central", predict(mindist, test_central, hidden.output = FALSE)$mean.error, | |
"mindist", "peripheral", predict(mindist, test_peripheral, hidden.output = FALSE)$mean.error) | |
# output the proportion error for the peripheral stimuli (averaging across the two) | |
error %>% | |
group_by(training) %>% | |
summarise(proportion_peripheral_error = error[test == "peripheral"] / | |
(error[test == "peripheral"] + error[test == "central"])) %>% | |
knitr::kable() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment