Skip to content

Instantly share code, notes, and snippets.

@ceshine
Created April 2, 2018 02:32
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save ceshine/f17234ba42630903b4c0d83673609c0a to your computer and use it in GitHub Desktop.
Save ceshine/f17234ba42630903b4c0d83673609c0a to your computer and use it in GitHub Desktop.
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(10)
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
is_training = tf.placeholder("bool")
# Define weights
logits = tf.layers.dense(
TemporalConvNet([nhid] * levels, kernel_size, dropout)(
X, training=is_training)[:, -1, :],
num_classes, activation=None,
kernel_initializer=tf.orthogonal_initializer()
)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=Y))
with tf.name_scope("optimizer"):
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# gvs = optimizer.compute_gradients(loss_op)
# for grad, var in gvs:
# if grad is None:
# print(var)
# capped_gvs = [(tf.clip_by_value(grad, -.5, .5), var) for grad, var in gvs]
# train_op = optimizer.apply_gradients(capped_gvs)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
print("All parameters:", np.sum([np.product([xi.value for xi in x.get_shape()]) for x in tf.global_variables()]))
print("Trainable parameters:", np.sum([np.product([xi.value for xi in x.get_shape()]) for x in tf.trainable_variables()]))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment