Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
# Create an interactive Tensorflow session
sess = tf.InteractiveSession()
# These will be inputs for the model
# Input pixels of images, flattened
# 1296 = 36*36 which is the size of images
x = tf.placeholder("float", [None, 1296])
## Known labels
y_ = tf.placeholder("float", [None,2])
# Hidden layer with 128 neurons
num_hidden = 128
# Variables
# W1 is for weights
# b1 is for bias
W1 = tf.Variable(tf.truncated_normal([1296, num_hidden],
stddev=1./math.sqrt(1296)))
b1 = tf.Variable(tf.constant(0.1,shape=[num_hidden]))
# Compute the activation function of the weighted sum -> produce 128 intermediate value
# Nonlinear transform functions - activation function: sigmoid
h1 = tf.sigmoid(tf.matmul(x,W1) + b1)
# Output Layer
# Logistic regression again
# tf.truncated_normal - Outputs random values from a truncated normal distribution.
W2 = tf.Variable(tf.truncated_normal([num_hidden, 2],
stddev=1./math.sqrt(2)))
b2 = tf.Variable(tf.constant(0.1,shape=[2]))
# Just initialize
sess.run(tf.global_variables_initializer())
# Define model
y = tf.nn.softmax(tf.matmul(h1,W2) + b2)
# Finish model specification, let us start training the model
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.