Skip to content

Instantly share code, notes, and snippets.

@da-steve101
Last active July 16, 2019 09:14
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save da-steve101/31693ebfa1b451562810d8644b788900 to your computer and use it in GitHub Desktop.
Save da-steve101/31693ebfa1b451562810d8644b788900 to your computer and use it in GitHub Desktop.
This is a modified version of https://gist.github.com/monikkinom/e97d518fe02a79177b081c028a83ec1c ... It uses only a single floating point value as the output instead of one hot encoding.
#Source code with the blog post at http://monik.in/a-noobs-guide-to-implementing-rnn-lstm-using-tensorflow/
import numpy as np
import random
from random import shuffle
import tensorflow as tf
# from tensorflow.models.rnn import rnn_cell
# from tensorflow.models.rnn import rnn
NUM_EXAMPLES = 10000
train_input = ['{0:020b}'.format(i) for i in range(2**20)]
shuffle(train_input)
train_input = [map(int,i) for i in train_input]
ti = []
for i in train_input:
temp_list = []
for j in i:
temp_list.append([j])
ti.append(np.array(temp_list))
train_input = ti
train_output = []
for i in train_input:
count = 0
for j in i:
if j[0] == 1:
count+=1
train_output.append([count/20.0])
test_input = train_input[NUM_EXAMPLES:]
test_output = train_output[NUM_EXAMPLES:]
train_input = train_input[:NUM_EXAMPLES]
train_output = train_output[:NUM_EXAMPLES]
print( "test and training data loaded" )
data = tf.placeholder(tf.float32, [None, 20,1]) #Number of examples, number of input, dimension of each input
target = tf.placeholder(tf.float32, [None, 1])
num_hidden = 24
cell = tf.contrib.rnn.LSTMCell(num_hidden,state_is_tuple=True)
val, _ = tf.nn.dynamic_rnn(cell, data, dtype=tf.float32)
val = tf.transpose(val, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)
weight = tf.Variable(tf.truncated_normal([num_hidden, int(target.get_shape()[1])]))
bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))
prediction = tf.matmul(last, weight) + bias
cross_entropy = tf.reduce_sum(tf.abs(target - tf.clip_by_value(prediction,1e-10,1.0)))
optimizer = tf.train.AdamOptimizer()
minimize = optimizer.minimize(cross_entropy)
pred_rounded = tf.round( prediction*20 )/20.0
mistakes = tf.not_equal( target, pred_rounded )
error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
init_op = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init_op)
batch_size = 1000
no_of_batches = int(len(train_input) / batch_size )
epoch = 100
for i in range(epoch):
ptr = 0
for j in range(no_of_batches):
inp = train_input[ptr:ptr+batch_size]
out = train_output[ptr:ptr+batch_size]
ptr+=batch_size
sess.run(minimize,{data: inp, target: out})
print( "Epoch ",str(i) )
incorrect = sess.run(error,{data: test_input, target: test_output})
print( sess.run(prediction,{data: [[[1],[0],[0],[1],[1],[0],[1],[1],[1],[0],[1],[0],[0],[1],[1],[0],[1],[1],[1],[0]]]}) )
print('Epoch {:2d} error {:3.1f}%'.format(i + 1, 100 * incorrect))
sess.close()
@ManishKV
Copy link

ManishKV commented Jun 1, 2017

getting below error on line 43:
val, _ = tf.nn.dynamic_rnn(cell, data, dtype=tf.float32)

ValueError: Attempt to have a second RNNCell use the weights of a variable scope that already has weights: 'rnn/lstm_cell'; and the cell was not constructed as LSTMCell(..., reuse=True). To share the weights of an RNNCell, simply reuse it in your second calculation, or create a new one with the argument reuse=True.

@tg12
Copy link

tg12 commented Mar 16, 2018

Hi, Would you be interested in helping me with this using forex data? ...

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment