Skip to content

Instantly share code, notes, and snippets.

@dekromp
Created March 28, 2018 16:35
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save dekromp/d0742f0280576a8d567887dea5fc3572 to your computer and use it in GitHub Desktop.
Save dekromp/d0742f0280576a8d567887dea5fc3572 to your computer and use it in GitHub Desktop.
import numpy as np
import tensorflow as tf
# Build the model architecture.
input1 = tf.placeholder(tf.float32, [None, 5], name='input1')
input2 = tf.placeholder(tf.float32, [None, 10], name='input2')
conc = tf.concat([input1, input2], axis=-1)
weights = tf.get_variable(
'weights',
initializer=np.random.normal(0, 0.01, size=(15, 1)).astype(np.float32))
bias = tf.get_variable('bias', initializer=tf.zeros(1))
logits = tf.reshape(tf.matmul(conc, weights) + bias, [-1])
output = tf.nn.sigmoid(logits)
# Set the learning objective.
target = tf.placeholder(tf.float32, [None], name='target')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=target, logits=logits))
# Set the gradient descent update rule.
learning_rate = tf.placeholder(tf.float32, [], name='learning_rate')
update_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Generate some random data.
x1 = np.random.uniform(-1, 1, size=(10, 5)).astype(np.float32)
x2 = np.random.uniform(-1, 1, size=(10, 10)).astype(np.float32)
y = np.random.choice([0, 1], size=10).astype(np.float32)
# Start the tensorflow session and initialize the model parameters.
with tf.Session() as session:
session.run(tf.global_variables_initializer())
# Update the model for 10 iterations.
for i in xrange(10):
train_error = session.run(
[loss, update_op],
feed_dict={input1: x1, input2: x2, target: y, learning_rate: 0.01})
print 'Training Error: %.4f' % train_error[0]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment