Skip to content

Instantly share code, notes, and snippets.

@tu-lee
Last active January 12, 2017 02:27
Show Gist options
  • Save tu-lee/0af2fd9d8c8d56e2c612e5ea0c3a4639 to your computer and use it in GitHub Desktop.
Save tu-lee/0af2fd9d8c8d56e2c612e5ea0c3a4639 to your computer and use it in GitHub Desktop.
RNN experiement using custom exponential signal data
import tensorflow as tf
import numpy as np
import random
import math
from mlxtend.preprocessing import one_hot
from tensorflow.python.ops import rnn, rnn_cell
"""
Hyperparameters
"""
hm_epochs = 10
n_classes = 2
batch_size = 10
chunk_size = 1
n_chunks = 1
rnn_size = 10
"""
Parameters for generating input multiple exponential signals for input
"""
lorange= 1
resolution= 500
hirange= 1000
amplitude= np.random.uniform(-10,10)
t = 100
no_tau = 100
"""Input signals"""
for X in range(no_tau):
random.seed()
tau = np.array([int(math.ceil(np.random.uniform(lorange, hirange)))])
X= np.array(amplitude * np.exp(-t / tau))
X = np.reshape(X, [-1, 1,1])
print(X.shape)
"""Output labels"""
count = 0
while (count <no_tau):
tau = np.array([int(math.ceil(np.random.uniform(lorange, hirange)))])
label = np.array(one_hot([int(math.ceil(tau / resolution))]))
count = count + 1
print ('For tau value of', tau, 'label is', label)
""" Input placeholders for signal and label"""
x = tf.placeholder('float', [None,n_chunks, chunk_size])
y = tf.placeholder('float')
"""Define RNN function"""
def recurrent_neural_network(x):
layer = {'weights':tf.Variable(tf.random_normal([rnn_size,n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, chunk_size])
x = tf.split(0, n_chunks, x)
lstm_cell = rnn_cell.BasicLSTMCell(rnn_size,state_is_tuple=True)
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
outputs = tf.matmul(outputs[-1],layer['weights']) + layer['biases']
return outputs
"""Training the network"""
def train_neural_network(x):
prediction = recurrent_neural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < no_tau:
start = i
end = i + batch_size
batch_x = np.array(X[start:end])
batch_y = np.array(label[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
epoch_loss += c
i += batch_size
print('Epoch', epoch + 1, 'completed out of', hm_epochs, 'loss:', epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = -tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({x: batch_x, y: batch_y}))
train_neural_network(x)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment