Skip to content

Instantly share code, notes, and snippets.

@tu-lee
Last active January 13, 2017 09:35
Show Gist options
  • Save tu-lee/9eb8bbfc81317bd3f288e207de7feb5a to your computer and use it in GitHub Desktop.
Save tu-lee/9eb8bbfc81317bd3f288e207de7feb5a to your computer and use it in GitHub Desktop.
import tensorflow as tf
import numpy as np
import random
import math
from mlxtend.preprocessing import one_hot
from tensorflow.python.ops import rnn, rnn_cell
"""
Hyperparameters
"""
hm_epochs = 10
n_classes = 2
batch_size = 1
chunk_size = 1
n_chunks = 1
rnn_size = 5
"""
Parameters for generating input multiple exponential signals for input
"""
lorange= 1
resolution= 500
hirange= 1000
amplitude= np.random.uniform(-10,10)
t = 100
no_tau = 100
"""Input signals"""
for X in range(no_tau):
random.seed()
tau = np.array([int(math.ceil(np.random.uniform(lorange, hirange)))])
X= amplitude * np.exp(-t / tau)
X = np.reshape(X, [-1, 1,1])
#print(X)
"""Output labels"""
cn = 0
class1 = [0]
class2 = [1]
while (cn <no_tau):
tau = np.array([int(math.ceil(np.random.uniform(lorange, hirange)))])
if tau<500:
label = one_hot(class1, num_labels=2)
else:
label = one_hot(class2, num_labels=2)
cn = count + 1
print ('For tau value of', tau, 'label is', label)
""" Input placeholders for signal and label"""
x = tf.placeholder('float', [None,n_chunks, chunk_size])
y = tf.placeholder('float')
#y = tf.placeholder(tf.float32, shape = label.get_shape())
"""Define RNN function"""
def recurrent_neural_network(x):
layer = {'weights':tf.Variable(tf.random_normal([rnn_size,n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, chunk_size])
x = tf.split(0, n_chunks, x)
lstm_cell = rnn_cell.BasicLSTMCell(rnn_size,state_is_tuple=True)
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
output = (tf.add(tf.matmul(outputs[-1], layer['weights']), layer['biases']))
return output
"""Training the network"""
def train_neural_network(x):
prediction = recurrent_neural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
optimizer = tf.train.GradientDescentOptimizer(.01).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < no_tau:
start = i
end = i + batch_size
batch_x = np.array(X[start:end])
batch_y = np.array(label[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
epoch_loss += c
i += batch_size
print('Epoch', epoch + 1, 'completed out of', hm_epochs, 'loss:', epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = -tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({x: batch_x, y: batch_y}))
train_neural_network(x)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment