Skip to content

Instantly share code, notes, and snippets.

@tu-lee
Created January 19, 2017 17:33
Show Gist options
  • Save tu-lee/22a2712320d8da5dbf774b95a2b92a01 to your computer and use it in GitHub Desktop.
Save tu-lee/22a2712320d8da5dbf774b95a2b92a01 to your computer and use it in GitHub Desktop.
import tensorflow as tf
import numpy as np
import random
import math
from mlxtend.preprocessing import one_hot
from tensorflow.python.ops import rnn, rnn_cell
from random import shuffle
"""
Hyperparameters
"""
hm_epochs = 100
n_classes = 2
batch_size = 10
chunk_size = 1
n_chunks = 1
rnn_size = 5
"""
Parameters for generating input multiple exponential signals for input
"""
lorange= 1
resolution= 500
hirange= 1000
amplitude= np.random.uniform(-10,10)
t = 100
no_tau = 100
"""Input signals"""
count = 0
class1 = [0]
class2 = [1]
train_input = []
train_output = []
while (count<no_tau):
tau =np.array(int(math.ceil(np.random.uniform(lorange, hirange))))
count = count+1
X = amplitude * np.exp(-t / tau)
X = np.reshape(X, [-1, 1,1])
if tau < 500:
label = one_hot(class1, num_labels=2)
else:
label = one_hot(class2, num_labels=2)
train_input.append(X)
#train_input = np.reshape(train_input, [-1, 1,1])
train_output.append(label)
print(train_input)
print(train_output)
"""testing data"""
ct = 0
class1 = [0]
class2 = [1]
test_input = []
test_output = []
while (count<no_tau):
tau =np.array(int(math.ceil(np.random.uniform(1000, 2000))))
count = count+1
X1 = amplitude * np.exp(-t / tau)
X1 = np.reshape(X, [-1, 1,1])
if tau < 500:
label = one_hot(class1, num_labels=2)
else:
label = one_hot(class2, num_labels=2)
test_input.append(X)
#train_input = np.reshape(train_input, [-1, 1,1])
test_output.append(label)
""" Input placeholders for signal and label"""
x = tf.placeholder('float', [None,n_chunks, chunk_size])
y = tf.placeholder('float')
"""Define RNN function"""
def recurrent_neural_network(x):
layer = {'weights':tf.Variable(tf.random_normal([rnn_size,n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, chunk_size])
x = tf.split(0, n_chunks, x)
lstm_cell = rnn_cell.BasicLSTMCell(rnn_size,state_is_tuple=True)
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
output = (tf.add(tf.matmul(outputs[-1], layer['weights']), layer['biases']))
return output
"""Training the network"""
def train_neural_network(x):
prediction = recurrent_neural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < no_tau:
start = i
end = i + batch_size
batch_x = np.array(train_input[start:end])
batch_x = np.reshape(batch_x, [-1, 1, 1])
batch_y = np.array(train_output[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
epoch_loss += c
i += batch_size
print('Epoch', epoch + 1, 'completed out of', hm_epochs, 'loss:', epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = -tf.reduce_mean(tf.cast(correct, 'float'))
print(accuracy)
#batch_y = np.reshape(batch_y, [10, 2])
print('Accuracy:', accuracy.eval({x: batch_x, y: batch_y}))
train_neural_network(x)
"""
Output labels
to = []
count = 0
class1 = [0]
class2 = [1]
while (count < no_tau):
tau = np.array([int(math.ceil(np.random.uniform(lorange, hirange)))])
if tau<500:
label = one_hot(class1, num_labels=2)
else:
label = one_hot(class2, num_labels=2)
count = count + 1
label.append(to)
"""
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment