Created
September 19, 2017 09:40
-
-
Save ByungSunBae/f9a30d00c79057d08c606f763c1b97f2 to your computer and use it in GitHub Desktop.
RNN using Tensorflow in R
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# 2) RNN model 생성---------------- | |
## Refer : https://github.com/hunkim/DeepLearningZeroToAll/blob/master/lab-12-5-rnn_stock_prediction.py | |
library(tensorflow) | |
library(reticulate) | |
contrib <- tf$contrib | |
tf$reset_default_graph() | |
# train Parameters | |
seq_length <- 1 | |
data_dim <- 1 | |
hidden_dim <- 10 | |
output_dim <- 1 | |
learning_rate <- 0.01 | |
iterations <- 100 | |
# | |
CountVecs <- as.numeric(CountTS) | |
## 기준시점까지의 데이터로 한달 후 판매건수를 예측 | |
DataForRNN <- data.table(Date = CountPerMonth[-141, Date], | |
X = CountVecs[-141], | |
Y = shift(CountVecs, n = 1, type = "lead")[-141]) | |
DataForRNN_matX <- as.matrix(DataForRNN[, X]) | |
DataForRNN_matY <- as.matrix(DataForRNN[, Y]) | |
## train / test split | |
train_inds = 1:119 | |
test_inds = 120:140 | |
trainX = DataForRNN_matX[train_inds, 1] %>% as.matrix() %>% lapply(., function(x) matrix(x, nrow = 1, ncol = 1)) | |
testX = DataForRNN_matX[test_inds, 1] %>% as.matrix() %>% lapply(., function(x) matrix(x, nrow = 1, ncol = 1)) | |
trainY = DataForRNN_matY[train_inds, 1] %>% as.matrix() | |
testY = DataForRNN_matY[test_inds, 1] %>% as.matrix() | |
## input place holders | |
X = tf$placeholder(tf$float32, shape = shape(NULL, seq_length, data_dim)) | |
Y = tf$placeholder(tf$float32, shape = shape(NULL, 1)) | |
## build a LSTM network | |
cell <- contrib$rnn$BasicLSTMCell( | |
num_units=hidden_dim, state_is_tuple=TRUE, activation=tf$tanh) | |
OutandState <- tf$nn$dynamic_rnn(cell, X, dtype = tf$float32) | |
outputs <- OutandState[[1]] | |
states_ <- OutandState[[2]] | |
Y_pred <- contrib$layers$fully_connected(outputs[[1]], as.integer(output_dim), activation_fn=NULL) # We use the last cell's output | |
# cost/loss | |
loss <- tf$reduce_sum(tf$square(Y_pred - Y)) # sum of the squares | |
# optimizer | |
optimizer <- tf$train$AdamOptimizer(learning_rate) | |
train <- optimizer$minimize(loss) | |
# RMSE | |
targets <- tf$placeholder(tf$float32, shape(NULL, 1)) | |
predictions <- tf$placeholder(tf$float32, shape(NULL, 1)) | |
rmse <- tf$sqrt(tf$reduce_mean(tf$square(targets - predictions))) | |
with(tf$Session() %as% sess, { | |
init = tf$global_variables_initializer() | |
sess$run(init) | |
# Training Step | |
for (i in 1:iterations){ | |
step_tmp <- sess$run(c(train, loss), feed_dict = dict(X = trainX, Y = trainY)) | |
step_loss <- step_tmp[[2]] | |
print(cat("[step: ", i, "] loss: ", step_loss, "\n")) | |
} | |
# Test step | |
test_predict <- sess$run(Y_pred, feed_dict = dict(X = testX)) | |
rmse_val = sess$run(rmse, feed_dict = dict( | |
targets = testY, predictions = test_predict[[1]])) | |
print(cat("RMSE: ", rmse_val)) | |
}) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment