Skip to content

Instantly share code, notes, and snippets.

@makark
Last active July 11, 2017 22:30
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save makark/79af6ca53ca27d51abb1d87c9b9bac07 to your computer and use it in GitHub Desktop.
Save makark/79af6ca53ca27d51abb1d87c9b9bac07 to your computer and use it in GitHub Desktop.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import sonnet as snt
import tensorflow as tf
import os
import pandas as pd
import numpy as np
import sklearn as sk
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
## Load data
data = pd.read_csv('features.csv', header = None)
X1 = data.ix[:,0:50]
y1 = data[51]
## Cast to 32bit
y = y1.values.astype(np.int32)
X = X1.values.astype(np.float32)
## Set NaNs to 10e-6
X[np.isnan(X)] = 0
## Feature Scaling and split the data into training and test sets
X_scaled = preprocessing.scale(X)
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=1)
## Convert label to one hot format
y_1Hot_train = tf.one_hot(y_train, 4)
y_1Hot_test = tf.one_hot(y_test, 4)
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 4
batch_size = 100
x = tf.placeholder(tf.float32, [None, 51])
y = tf.placeholder(tf.float32)
def neural_net_model (data):
hidden_1_layer = {'weights': tf.Variable(tf.truncated_normal([51, n_nodes_hl1], stddev=0.1)),
'biases': tf.Variable(tf.constant(0.1,shape=[n_nodes_hl1])) }
hidden_2_layer = {'weights': tf.Variable(tf.truncated_normal([n_nodes_hl1, n_nodes_hl2], stddev=0.1)),
'biases': tf.Variable(tf.constant(0.1, shape = [n_nodes_hl2])) }
hidden_3_layer = {'weights': tf.Variable(tf.truncated_normal([n_nodes_hl2, n_nodes_hl3], stddev=0.1)),
'biases': tf.Variable(tf.constant(0.1, shape = [n_nodes_hl3])) }
output_layer = {'weights': tf.Variable(tf.truncated_normal([n_nodes_hl3, n_classes], stddev=0.1)),
'biases': tf.Variable(tf.constant(0.1, shape = [n_classes])) }
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'] )
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'] )
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'] )
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output_layer['weights'] + output_layer['biases'] )
return output
def train_neural_network(x):
prediction = neural_net_model(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
learning_rate = 0.00001
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# optimizer = tf.train.GradientDescentOptimizer(0.00001).minimize(cost)
hm_epochs = 30
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range (hm_epochs):
epoch_loss = 0
itere = int(X_train.shape[0]/batch_size)
last = 0
add = 1
for start in range(itere):
x_train_epoch = X_train[last: ((start + add) * batch_size),:]
y_train_epoch = y_1Hot_train.eval()[last: ((start + add) * batch_size),:]
# print("shape of x", x_train_epoch.shape, "shape of y", y_train_epoch.shape)
_, c = sess.run([optimizer, cost], feed_dict = {x: x_train_epoch, y: y_train_epoch})
epoch_loss += c
last = start * batch_size
add = 0
# correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
# accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
# print( "within step accuracy", accuracy.eval( {x: X_test, y: y_1Hot_test.eval() }))
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss', epoch_loss )
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval( {x: X_test, y: y_1Hot_test.eval() }))
train_neural_network(x)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment