Skip to content

Instantly share code, notes, and snippets.

@edoffagne
Created November 22, 2017 07:36
Show Gist options
  • Save edoffagne/cd3431885df9e715fe376c6bbc869054 to your computer and use it in GitHub Desktop.
Save edoffagne/cd3431885df9e715fe376c6bbc869054 to your computer and use it in GitHub Desktop.
Train a convolutional network for the SVHN dataset
# This script trains a convolutional network for the SVHN dataset
# with the following arhitecture:
#
# 1. a convolutional layer with 32 filters of size 5 × 5, max-pooling over
# 3 × 3-pixel regions and ReLU activations
# 2. a convolutional layer with 32 filters of size 5 × 5, ReLU activations
# and average-pooling over 3 × 3-pixel regions
# 3. a convolutional layer with 64 filters of size 5 × 5, ReLU activations
# and max-pooling over 3 × 3-pixel regions
# 4. a fullyconnected layer with softmax units
#
# After 25 epochs, it reaches a test accuracy around 0.88.
require(R.matlab)
require(keras)
require(reticulate)
# Read data downloaded
# from http://ufldl.stanford.edu/housenumbers/
data_train <- readMat("train_32x32.mat")
data_test <- readMat("test_32x32.mat")
batch_size <- 100
num_classes <- 10
epochs <- 25
# Input image dimensions
img_rows <- 32
img_cols <- 32
# The data
x_train <- data_train$X
y_train <- data_train$y
x_test <- data_test$X
y_test <- data_test$y
rm(data); gc()
# Need to put the data in the right format
d <- dim(x_train)
train <- array(dim=c(d[4], d[1], d[2], d[3]))
for (i in 1:d[4])
{ train[i,,,] <- x_train[,,,i]
}
d = dim(x_test)
test = array(dim=c(d[4], d[1], d[2], d[3]))
for (i in 1:d[4])
{ test[i,,,] <- x_test[,,,i]
}
# Redefine dimension of train/test inputs
x_train <- array_reshape(train, c(dim(x_train)[4], img_rows, img_cols, 3))
x_test <- array_reshape(test, c(dim(x_test)[4], img_rows, img_cols, 3))
input_shape <- c(img_rows, img_cols, 3)
cat('x_train_shape:', dim(x_train), '\n')
cat(nrow(x_train), 'train samples\n')
cat(nrow(x_test), 'test samples\n')
# Convert class vectors to binary class matrices
y_train <- to_categorical(y_train-1, num_classes)
y_test <- to_categorical(y_test-1, num_classes)
# Define Model
model <- keras_model_sequential()
model %>%
layer_conv_2d(filters = 32, kernel_size = c(5,5), activation = 'relu',
input_shape = input_shape, padding="same") %>%
layer_max_pooling_2d(pool_size = c(3, 3)) %>%
layer_conv_2d(filters = 32, kernel_size = c(5,5), activation = 'relu', padding="same") %>%
layer_average_pooling_2d(pool_size = c(3, 3)) %>%
layer_conv_2d(filters = 64, kernel_size = c(5,5), activation = 'relu', padding="same") %>%
layer_max_pooling_2d(pool_size = c(3, 3)) %>%
layer_flatten() %>%
layer_dense(units = num_classes, activation = 'softmax')
# Compile model
model %>% compile(
loss = loss_categorical_crossentropy,
optimizer = optimizer_adadelta(),
metrics = c('accuracy')
)
# Train & Evaluate
model %>% fit(
x_train, y_train,
batch_size = batch_size,
epochs = epochs,
verbose = 1,
validation_data = list(x_test, y_test)
)
scores <- model %>% evaluate(
x_test, y_test, verbose = 0
)
# Output metrics
cat('Test loss:', scores[[1]], '\n')
cat('Test accuracy:', scores[[2]], '\n')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment