Skip to content

Instantly share code, notes, and snippets.

@Millsky
Created April 14, 2016 03:01
Show Gist options
  • Save Millsky/d8f77a04ec14abf0e92e75955e7c5e74 to your computer and use it in GitHub Desktop.
Save Millsky/d8f77a04ec14abf0e92e75955e7c5e74 to your computer and use it in GitHub Desktop.
import tensorflow as tf
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
x_learn = np.array([[1.000,2.000],
[10.000,2.0000],
[9.000,3.0000],
[5.000,8.000],
[2.00,8.0000],
[1.00,8.0000],
[3.00,8.0000],
[11.00,3.0000],
[12.00,1.0000],
[5.00,30.0000],
[-10.00,8.0000],
[8.00,8.0000],
[2.00,11.0000],
[10.000,2.0000],
[9.000,3.0000],
[5.000,8.000],
[2.00,8.0000],
[1.00,100.0000],
[1.00,8.0000],
[11.00,3.0000],
[12.00,1.0000],
[6.00,30.0000],
[3.00,7.5000],
[8.00,9.0000],
[10.00,5.0000]
],dtype=np.float32)
x_learn2 = 10 * np.random.random((2000,2)) - 1
y_learn2 = np.power(x_learn2.T[0],2) + x_learn2.T[1]
y_learn2 = y_learn2.reshape(2000,1)
y_learn2 = y_learn2
y_learn = np.array([[3.000],
[102.000],
[84.000],
[33.000],
[12.00],
[9.00],
[17.00],
[124.00],
[145.00],
[55.00],
[108.00],
[72.00],
[15.00],
[102.000],
[84.000],
[33.000],
[12.00],
[101.00],
[9.00],
[124.00],
[145.00],
[66.00],
[16.50],
[73.00],
[105.00]
],dtype=np.float32)
x_test = np.array([[1,3],[8,10],[9,3],[5,8],[0,8]],dtype=np.float32)
y_test = np.array([[4],[74],[84],[33],[8]],dtype=np.float32)
slearning_rate = .79966432
x = tf.placeholder(tf.float32,[None,2])
y = tf.placeholder(tf.float32,[None,1])
weights = {
'h1': tf.Variable(10 * tf.random_normal([2,2],dtype= tf.float32)),
'h2': tf.Variable(2 * tf.random_normal([2,4], dtype = tf.float32)),
'h3': tf.Variable(tf.random_normal([4,4], dtype = tf.float32)),
'h4': tf.Variable(tf.random_normal([4,1], dtype = tf.float32))
}
biases = {
'b1': tf.Variable(10 * tf.random_normal([1,2],dtype= tf.float32)),
'b2': tf.Variable(2*tf.random_normal([1,4],dtype= tf.float32)),
'b3': tf.Variable(tf.random_normal([1,4],dtype= tf.float32)),
'b4': tf.Variable(tf.random_normal([1],dtype= tf.float32))
}
def passForward(_X,_weights,_biases):
layer_1 = tf.nn.tanh(tf.add(tf.matmul(_X,_weights['h1']),_biases['b1']))
layer_2 = tf.nn.tanh(tf.add(tf.matmul(layer_1,_weights['h2']),_biases['b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2,_weights['h3']),_biases['b3']))
layer_4 = tf.matmul(layer_3,_weights['h4']) + _biases['b4']
return layer_4
global_step = tf.Variable(0, trainable=False)
lossFunc = tf.nn.l2_loss(passForward(x,weights,biases) - y)
learning_rate = tf.train.exponential_decay(slearning_rate, global_step,10000, 0.92, staircase=True)
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(lossFunc,global_step)
init = tf.initialize_all_variables()
sess = tf.InteractiveSession()
sess.run(init)
for j in xrange(200000):
sess.run(train_step, feed_dict={x: x_learn2,y: y_learn2})
if j%2000 == 0:
print lossFunc.eval({x: x_learn2,y: y_learn2})/2000
print learning_rate.eval()
print passForward(x,weights,biases).eval({x: x_test})
#print y_learn2
print passForward(x_test,weights,biases).eval()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment