Skip to content

Instantly share code, notes, and snippets.

@nomanahmedsheikh
Created October 6, 2018 19:27
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save nomanahmedsheikh/e768067fc962e81032b8d81c7d23d58d to your computer and use it in GitHub Desktop.
Save nomanahmedsheikh/e768067fc962e81032b8d81c7d23d58d to your computer and use it in GitHub Desktop.
Tensorflow network for curve fitting
'''
References used for learning
https://github.com/aymericdamien/TensorFlow-Examples
https://datascience.stackexchange.com/questions/20058/tensorflow-regression-using-deep-neural-network
'''
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
size = 1000
def generate_data(size):
x = np.array(np.random.rand(size,))
y = 8 * (x**2) + 8*x + 5
return np.transpose([x]),np.transpose([y])
vector_X,vector_Y = generate_data(size)
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
num_input = 1
num_classes = 1
# Parameters
learning_rate = 0.0001
num_steps = 400
batch_size = 128
display_step = 10
# tf Graph input
X = tf.placeholder("float", [None, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
# Create model
def neural_net(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden fully connected layer with 256 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Construct model
prediction = neural_net(X)
# Define cost and optimizer
loss = tf.reduce_mean(tf.square((prediction - Y)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
plotValues = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(1, num_steps+1):
loss_val,_ = sess.run([loss, optimizer], feed_dict={X: vector_X,Y: vector_Y})
plotValues.append(loss_val)
if step % display_step == 0 or step == 1:
print("Step " + str(step) + ", Loss= " + "{:.4f}".format(loss_val))
print("Optimization Finished!")
plt.plot(plotValues)
plt.xlabel("Iterations")
plt.ylabel("cost")
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment