Skip to content

Instantly share code, notes, and snippets.

@SandroLuck
Created May 9, 2017 23:25
Show Gist options
  • Save SandroLuck/b09e874fefaee878e2f67b73e63e9e7f to your computer and use it in GitHub Desktop.
Save SandroLuck/b09e874fefaee878e2f67b73e63e9e7f to your computer and use it in GitHub Desktop.
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
"""
input>weight>hidden layer 1 (actv funct)>weight>hidden l2 (actv fucnt)>weights>output lacer
compare output to intended output>cost function(cross entropy)
optimization funvtion(omptimier)>minimize cost(AdamOptimizer...SGD,AdaGrad)
feed forward + backprob = epoch
"""
mnist= input_data.read_data_sets("/tmp/data", one_hot=True)
n_nodes_hl1=784
n_nodes_hl2=342
n_nodes_hl3=1
n_classes=10
batch_size=100
x=tf.placeholder('float',[None, 784])
y=tf.placeholder('float')
def neural_network_model(data):
#(input_data*weights)+biases
hidden_1_layer ={
'weights':tf.Variable(tf.random_normal([784,n_nodes_hl1])),
}
hidden_2_layer ={
'weights':tf.Variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2])),
}
hidden_3_layer ={
'weights':tf.Variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3])),
}
output_layer ={
'weights':tf.Variable(tf.random_normal([n_nodes_hl3,n_classes])),
}
l1=tf.matmul(data,hidden_1_layer['weights'])
l1=tf.nn.relu(l1)
l2=tf.matmul(l1,hidden_2_layer['weights'])
l2=tf.nn.relu(l2)
l3=tf.matmul(l2,hidden_3_layer['weights'])
l3=tf.nn.relu(l3)
output=tf.matmul(l3, output_layer['weights'])
return output
def train_neural_network(x):
prediction=neural_network_model(x)
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction,y))
#defualt learning rate=0.001
optimizer =tf.train.AdamOptimizer().minimize(cost)
#cycles feed forward +backprob
hm_epochs=1
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss=0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y=mnist.train.next_batch(batch_size)
_,c=sess.run([optimizer, cost], feed_dict={ x:epoch_x , y:epoch_y})
epoch_loss+=c
print('Epoch:',epoch," completed out of", hm_epochs, 'loss:', epoch_loss)
correct=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct,'float'))
print('Accuracy:',accuracy.eval({x:mnist.test.images,y:mnist.test.labels}))
train_neural_network(x)
"""
onehot: 0=[1,0,0,0,0,0,0,0,0,0]
onehot: 1=[1,0,0,0,0,0,0,0,0,0]
onehot: 2=[0,0,1,0,0,0,0,0,0,0]
onehot: 3=[1,0,0,0,0,0,0,0,0,0]
onehot: 4=[0,1,0,0,0,0,0,0,0,0]
10 classes, 0-9
"""
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment