Skip to content

Instantly share code, notes, and snippets.

@HackerEarthBlog
HackerEarthBlog / DL3_H2o.R
Last active January 31, 2017 07:29
Deep Learning
deepmodel <- h2o.deeplearning(x = x
,y = y
,training_frame = trainh2o
,validation_frame = testh2o
,standardize = T
,model_id = "deep_model"
,activation = "Rectifier"
,epochs = 100
,seed = 1
,hidden = 5
path = "~/mydata/deeplearning"
setwd(path)
#load libraries
library(data.table)
library(mlr)
#set variable names
setcol <- c("age",
"workclass",
"fnlwgt",
#load the package
require(h2o)
#start h2o
localH2o <- h2o.init(nthreads = -1, max_mem_size = "20G")
#load data on H2o
trainh2o <- as.h2o(train)
testh2o <- as.h2o(test)
#set parameter space
activation_opt <- c("Rectifier","RectifierWithDropout", "Maxout","MaxoutWithDropout")
hidden_opt <- list(c(10,10),c(20,15),c(50,50,50))
l1_opt <- c(0,1e-3,1e-5)
l2_opt <- c(0,1e-3,1e-5)
hyper_params <- list( activation=activation_opt,
hidden=hidden_opt,
l1=l1_opt,
l2=l2_opt )
# # Installation - Windows
install.packages("drat", repos="https://cran.rstudio.com")
drat:::addRepo("dmlc")
install.packages("mxnet")
library(mxnet)
#Installation - Linux
#Press Ctrl + Alt + T and run the following command
sudo apt-get update
sudo apt-get -y install git
#load package
require(mxnet)
#convert target variables into numeric
train[,target := as.numeric(target)-1]
test[,target := as.numeric(target)-1]
#convert train data to matrix
train.x <- data.matrix(train[,-c("target"),with=F])
train.y <- train$target
#set seed to reproduce results
mx.set.seed(1)
mlpmodel <- mx.mlp(data = train.x
,label = train.y
,hidden_node = 3 #one layer with 10 nodes
,out_node = 2
,out_activation = "softmax" #softmax return probability
,num.round = 100 #number of iterations over training data
,array.batch.size = 20 #after every batch weights will get updated
#create NN structure
data <- mx.symbol.Variable("data")
fc1 <- mx.symbol.FullyConnected(data, num_hidden=3) #3 neuron in one layer
lrm <- mx.symbol.SoftmaxOutput(fc1)
nnmodel <- mx.model.FeedForward.create(symbol = lrm
,X = train.x
,y = train.y
,ctx = mx.cpu()
,num.round = 100
,eval.metric = mx.metric.accuracy
,array.batch.size = 50
,learning.rate = 0.01)
#configure another network
data <- mx.symbol.Variable("data")
fc1 <- mx.symbol.FullyConnected(data, name = "fc1", num_hidden=10) #1st hidden layer
act1 <- mx.symbol.Activation(fc1, name = "sig", act_type="relu")
fc2 <- mx.symbol.FullyConnected(act1, name = "fc2", num_hidden=2) #2nd hidden layer
out <- mx.symbol.SoftmaxOutput(fc2, name = "soft")