Skip to content

Instantly share code, notes, and snippets.

@kwakseonghun
Created August 10, 2017 01:00
Show Gist options
  • Save kwakseonghun/38b2ba34ed4599eb1ede042a09799f3c to your computer and use it in GitHub Desktop.
Save kwakseonghun/38b2ba34ed4599eb1ede042a09799f3c to your computer and use it in GitHub Desktop.
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
def MinMaxScaler(data):
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
# noise term prevents the zero division
return numerator / (denominator + 1e-7)
timesteps=seq_length=1
data_dim=400
output_dim=3
xy=np.loadtxt('data-ar-001.csv',delimiter=',')
x=xy[0:1000,100:500]
y=xy[0:1000,500:503]
x=MinMaxScaler(x)
#xy1=np.loadtxt('data-02-test-score.csv',delimiter=',')
x1=xy[9900:10000,100:500]
y1=xy[9900:10000,500:503]
x1=MinMaxScaler(x1)
dataX=[]
dataY=[]
dataXt=[]
dataYt=[]
for i in range(0,len(y)):
_x=x[i:i+1]
_y=y[i]
dataX.append(_x)
dataY.append(_y)
for j in range(0,len(y1)):
__x=x1[j:j+1]
__y=y1[j]
dataXt.append(__x)
dataYt.append(__y)
trainX=dataX
trainY=dataY
testX=dataXt
testY=dataYt
X=tf.placeholder(tf.float32,[None,seq_length,data_dim])
Y=tf.placeholder(tf.float32,[None,3])
hidden_dim=3
output_dim=50
def lstm_cell():
cell = rnn.BasicLSTMCell(hidden_dim, state_is_tuple=True)
return cell
cell = rnn.MultiRNNCell([lstm_cell() for _ in range(3)], state_is_tuple=True)
outputs,_states=tf.nn.dynamic_rnn(cell,X,dtype=tf.float32)
Y_pred=tf.contrib.layers.fully_connected(outputs[:,-1],output_dim,activation_fn=None)
dropout_rate=tf.placeholder("float")
W1=tf.Variable(tf.random_normal([50,20]))
W2=tf.Variable(tf.random_normal([20,10]))
#W3=tf.Variable(tf.random_normal([30,20]))
#W4=tf.Variable(tf.random_normal([20,10]))
W5=tf.Variable(tf.random_normal([10,3]))
b1=tf.Variable(tf.zeros([20]), name="Bias1")
b2=tf.Variable(tf.zeros([10]), name="Bias2")
#b3=tf.Variable(tf.zeros([20]), name="Bias3")
#b4=tf.Variable(tf.zeros([10]), name="Bias4")
b5=tf.Variable(tf.zeros([3]), name="Bias5")
with tf.name_scope("layer1") as scope:
L1=tf.nn.softmax(tf.matmul(Y_pred,W1)+b1)
L1=tf.nn.dropout(L1,dropout_rate)
with tf.name_scope("layer2") as scope:
L2=tf.nn.sigmoid(tf.matmul(L1,W2)+b2)
L2=tf.nn.dropout(L2,dropout_rate)
#with tf.name_scope("layer3") as scope:
# L3=tf.nn.relu(tf.matmul(L2,W3)+b3)
# L3=tf.nn.dropout(L3,dropout_rate)
#with tf.name_scope("layer4") as scope:
# L4=tf.nn.sigmoid(tf.matmul(L3,W4)+b4)
# L4=tf.nn.dropout(L4,dropout_rate)
with tf.name_scope("last") as scope:
Y_pred=tf.nn.softmax(tf.matmul(L2,W5)+b5)
loss=-tf.reduce_mean(Y*tf.log(Y_pred))
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(Y_pred,1),tf.arg_max(Y,1)),dtype=tf.float32))
optimizer=tf.train.AdamOptimizer(0.01)
train=optimizer.minimize(loss)
sess=tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(3000):
_,l=sess.run([train,loss],feed_dict={X:trainX,Y:trainY,dropout_rate:0.5})
if i%200==0:
print(i,l)
testPredict=sess.run(Y_pred,feed_dict={X:testX,dropout_rate:1.0})
acc=sess.run(accuracy,feed_dict={X:testX,Y:testY,dropout_rate:1.0})
print("\n정확도: ", acc)
print(np.argmax(testPredict,1))
print(np.argmax(testY,1))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment