Skip to content

Instantly share code, notes, and snippets.

@eladshabi
eladshabi / Generator_SP_MP.py
Last active March 4, 2019 16:59
Generator SP and MP
# source: https://github.com/hwalsuklee/tensorflow-generative-model-collections/blob/master/ACGAN.py
def generator(self, z, y, is_training=True, reuse=False):
if self.mixed:
with tf.variable_scope("generator", reuse=reuse,
custom_getter=float32_variable_storage_getter):
# merge noise and code
z = concat([z, y], 1)
net = fc(z, 1024, scope='g_fc1', activation_fn=None)
@eladshabi
eladshabi / Gen_Sp.py
Last active March 4, 2019 16:57
Single precision generator
# source: https://github.com/hwalsuklee/tensorflow-generative-model-collections/blob/master/ACGAN.py
with tf.variable_scope("generator", reuse=reuse):
# merge noise and code
z = concat([z, y], 1)
net = fc(z, 1024, scope='g_fc1', activation_fn=None)
net = bn(net, is_training=is_training, scope='g_bn1')
net = tf.nn.relu(net)
@eladshabi
eladshabi / Conv2d_MP.py
Last active March 4, 2019 16:55
Conv2d by dtype
# source: https://github.com/hwalsuklee/tensorflow-generative-model-collections/blob/master/ops.py
def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="conv2d",
data_type=tf.float32):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev),
dtype=data_type)
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
@eladshabi
eladshabi / MP_optimizer.py
Last active February 25, 2019 13:07
Optimizers
# source: https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
with tf.device('/gpu:0'), \
tf.variable_scope('fp32_storage',custom_getter=float32_variable_storage_getter):
data, target, loss = create_simple_model(nbatch, nin, nout, dtype)
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# Traning variables
lr = 0.0002
beta = 0.5
@eladshabi
eladshabi / Gen_Mp.py
Last active February 25, 2019 12:41
Mixed precision generator
# source: https://github.com/hwalsuklee/tensorflow-generative-model-collections/blob/master/ACGAN.py
with tf.variable_scope("generator", reuse=reuse, custom_getter=float32_variable_storage_getter):
# merge noise and code
z = concat([z, y], 1)
net = fc(z, 1024, scope='g_fc1', activation_fn=None)
# Batch normalization should be calculated as type of float32
net = tf.cast(net, tf.float32)
net = bn(net, is_training=is_training, scope='g_bn1')
@eladshabi
eladshabi / Loss_scaling.py
Last active February 25, 2019 12:39
Mixed precision loss scaling
# source: https://github.com/hwalsuklee/tensorflow-generative-model-collections/blob/master/ACGAN.py
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
q_vars = [var for var in t_vars if ('d_' in var.name) or ('c_' in var.name) or ('g_' in var.name)]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
@eladshabi
eladshabi / Custom getter.py
Last active February 25, 2019 12:33
Use custom getter
# source: https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
def float32_variable_storage_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None,
trainable=True,
*args, **kwargs):
storage_dtype = tf.float32 if trainable else dtype
variable = getter(name, shape, dtype=storage_dtype,
initializer=initializer, regularizer=regularizer,
@eladshabi
eladshabi / Fp16_model.py
Last active February 10, 2019 14:33
Create model using float 16 data type
# source: https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
import tensorflow as tf
def create_simple_model(nbatch, nin, nout, dtype):
"""A simple softmax model."""
data = tf.placeholder(dtype, shape=(nbatch, nin))
weights = tf.get_variable('weights', (nin, nout), dtype)
biases = tf.get_variable('biases', nout, dtype, initializer=tf.zeros_initializer())
logits = tf.matmul(data, weights) + biases
target = tf.placeholder(tf.float32, shape=(nbatch, nout))