Skip to content

Instantly share code, notes, and snippets.

@NathanHowell
Created February 18, 2017 01:14
Show Gist options
  • Save NathanHowell/44c932bf82fced2e04d67e8b27490be8 to your computer and use it in GitHub Desktop.
Save NathanHowell/44c932bf82fced2e04d67e8b27490be8 to your computer and use it in GitHub Desktop.
import tensorflow as tf
from scipy.special import logit
def centering_inference(c):
"""
:param c: the number of target classes
:return: a uniformly distributed class bias variable
"""
return tf.Variable(
initial_value=tf.constant(
value=logit(1. / c),
dtype=tf.float32,
shape=(c,)),
name="Bias",
trainable=False)
def centering_loss(bias, y):
"""
:param bias: bias variable allocated with :func:`centering_inference`
:param y: actual targets with a shape of [batch_size, targets]
:return:
"""
targets = tf.to_float(tf.sparse_to_indicator(y, tf.shape(bias)[0]))
targets = tf.reduce_mean(targets, 0)
return tf.nn.sigmoid_cross_entropy_with_logits(
logits=bias,
labels=targets)
def optimize(c, y):
with tf.variable_scope("Centering"):
bias = centering_inference(c)
loss = centering_loss(bias, y)
optimizer = tf.train.AdagradOptimizer(learning_rate=0.5)
minimizer = optimizer.minimize(loss, var_list=[bias])
with tf.control_dependencies([minimizer]):
return tf.identity(bias)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment