Skip to content

Instantly share code, notes, and snippets.

@eladshabi
Last active Feb 25, 2019
Embed
What would you like to do?
Mixed precision generator
# source: https://github.com/hwalsuklee/tensorflow-generative-model-collections/blob/master/ACGAN.py
with tf.variable_scope("generator", reuse=reuse, custom_getter=float32_variable_storage_getter):
# merge noise and code
z = concat([z, y], 1)
net = fc(z, 1024, scope='g_fc1', activation_fn=None)
# Batch normalization should be calculated as type of float32
net = tf.cast(net, tf.float32)
net = bn(net, is_training=is_training, scope='g_bn1')
# Leveraging the tensors core for fully connected weight.
net = tf.cast(net, tf.float16)
net = tf.nn.relu(net)
net = fc(net, 128 * 8 * 8, scope='g_fc2', activation_fn=None)
# Batch normalization should be calculated as type of float32
net = tf.cast(net, tf.float32)
net = bn(net, is_training=is_training, scope='g_bn2')
# Leveraging the tensors core
net = tf.cast(net, tf.float16)
net = tf.nn.relu(net)
net = tf.reshape(net, [self.batch_size, 8, 8, 128])
net = deconv2d(net, [self.batch_size, 16, 16, 64], 4, 4, 2, 2, name='g_dc3', data_type=self.dtype)
# Batch normalization should be calculated as type of float32
net = tf.cast(net, tf.float32)
net = bn(net, is_training=is_training, scope='g_bn3')
# Leveraging the tensors core
net = tf.cast(net, tf.float16)
net = tf.nn.relu(net)
net = deconv2d(net, [self.batch_size, self.output_height, self.output_width, self.c_dim], 4, 4, 2, 2, name='g_dc4', data_type=self.dtype)
# Sigmoid should be calculated as type of float32
net = tf.cast(net, tf.float32)
out = tf.nn.sigmoid(net)
return out
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment