Skip to content

Instantly share code, notes, and snippets.

@mmeendez8
Created June 20, 2018 15:25
Show Gist options
  • Save mmeendez8/d9e5c39e4fe0a8a8867816703cb1d3c9 to your computer and use it in GitHub Desktop.
Save mmeendez8/d9e5c39e4fe0a8a8867816703cb1d3c9 to your computer and use it in GitHub Desktop.
Tensorflow dynamic feed forward neural network creator
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape, stddev=0.1)
return tf.Variable(weights)
def init_bias(shape):
"""Create a bias variable with appropriate initialization."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def propagate(input, weights, bias, act=None):
""""
Forward-propagation
"""
activations = []
a = None
for i, (w, b) in zip(weights, bias):
with tf.name_scope('Layer {}'.format(i)):
a = tf.matmul(input, w) + b
# Activation for last layer is not necessary (we apply softmax later)
if act and i+1 != len(bias):
a = act(a)
input = a
activations.append(a)
return activations, a
def get_weights(*args):
"""
Get weights matrix and bias tensors given sizes of the nn
"""
weights = []
biases = []
for i, _ in range(len(args)-1):
weights.append(init_weights((args[i], args[i+1])))
biases.append(init_bias(args[i+1]))
return weights, biases
def run(data_size, epochs=100, batch_size=32, beta=.1, learning_rate=.01):
tf.reset_default_graph()
name = 's{}_e{}_b{}_r{}_lr{}'.format(data_size, epochs, batch_size, beta, learning_rate)
train_X, train_y, test_X, test_y, class_weights = read_data(data_size)
batch_size = 32
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes
h_size = 10 # Number of hidden nodes
layers = [x_size, h_size, len(class_weights)]
weights = []
bias = []
act = tf.nn.relu
for i in range(1, len(layers)):
weights.append(init_weights((layers[i-1], layers[i])))
bias.append(init_bias([layers[i]]))
# Data placeholders
X = tf.placeholder("float32", shape=[None, x_size], name='input_data')
y = tf.placeholder("int64", shape=[None, ], name='input_labels')
# Forward step
activations = []
for i in range(len(weights)):
with tf.name_scope('Layer_{}'.format(i)):
if i == 0:
a = tf.matmul(X, weights[i]) + bias[i]
else:
a = tf.matmul(a, weights[i]) + bias[i]
# Activation for last layer is not necessary (we apply softmax later)
if act and i+1 != len(bias):
a = act(a)
activations.append(a)
logits = a
# Run SGD
sess = tf.Session()
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init)
# Backward propagation
predict = tf.argmax(logits, axis=1)
# Compute regularization term
with tf.name_scope("loss"):
reg = 0
for i in range(len(weights)):
reg += tf.nn.l2_loss(weights[i])
class_weights = tf.gather(class_weights, y)
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(y, logits, class_weights)) + reg*beta
tf.summary.scalar("loss", loss)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Metrics
with tf.name_scope("accuracy"):
# correct_prediction = tf.equal(predict, tf.argmax(y, axis=1))
correct_prediction = tf.equal(predict, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("accuracy", accuracy)
# Initialize tf variables
# Run SGD
sess = tf.Session()
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init)
# Log writers
merge = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(LOGDIR + name + '/train/', sess.graph)
test_writer = tf.summary.FileWriter(LOGDIR + name +'/test/', sess.graph)
for epoch in range(epochs):
# Train with each example
i=0
while i < len(train_X):
try:
sess.run(train_step, feed_dict={X: train_X[i: i + batch_size, :], y: train_y[i: i+batch_size]})
except tf.errors.OutOfRangeError:
sess.run(train_step, feed_dict={X: train_X[i:, :], y: train_y[i:]})
i += batch_size
summ_train, train_accuracy = sess.run([merge, accuracy], feed_dict={X: train_X, y: train_y})
summ_test, test_accuracy = sess.run([merge, accuracy], feed_dict={X: test_X, y: test_y})
train_writer.add_summary(summ_train, epoch)
test_writer.add_summary(summ_test, epoch)
if epoch % 100 == 0:
print("Epoch = %d, train accuracy = %.2f%%" % (epoch, train_accuracy))
sess.close()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment