Skip to content

Instantly share code, notes, and snippets.

@nlintz
Created November 13, 2015 22:29
Show Gist options
  • Save nlintz/587aa382a63a26657ad1 to your computer and use it in GitHub Desktop.
Save nlintz/587aa382a63a26657ad1 to your computer and use it in GitHub Desktop.
import tensorflow as tf
import numpy as np
# Parameters
learning_rate = 0.001
decay=.9
training_epochs = 15
batch_size = 100
display_step = 100
#Network Parameters
n_hidden_1 = 4
n_hidden_2 = 4
n_input = 2
n_output = 1
trX = [[0.,0.],[1.,0.],[0.,1.],[1.,1.]]
trY = [[0.],[1.],[1.],[0.]]
teX=trX
teY=trY
# Create model
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_output])
def multilayer_perceptron(_X, _weights, _biases):
layer_1 = tf.nn.sigmoid(tf.matmul(_X, _weights['h1']) + _biases['b1'])
layer_2 = tf.nn.sigmoid(tf.matmul(layer_1, _weights['h2']) + _biases['b2'])
return tf.matmul(layer_2, weights['out']) + biases['out']
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_output]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_output]))
}
pred = multilayer_perceptron(x, weights, biases)
cost = tf.pow((pred-y),2)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
with tf.Session() as sess:
init = tf.initialize_all_variables()
sess.run(init)
for i in range(2000):
avg_cost = 0.0
sess.run(optimizer, feed_dict={x: trX[:], y: trY[:]})
avg_cost += sum(sess.run(cost, feed_dict={x: trX[:], y: trY[:]})/len(trX))
if i % display_step == 0:
print "Epoch:", '%d' % (i+1), "cost=", "%f" % avg_cost
print "Optimization Finished!"
print(sess.run(pred, feed_dict={x: teX[:], y: teY[:]}))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment