Skip to content

Instantly share code, notes, and snippets.

@litanlitudan
Created March 3, 2021 04:04
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save litanlitudan/d81dd14ef59dc981ed4ed125a120122d to your computer and use it in GitHub Desktop.
Save litanlitudan/d81dd14ef59dc981ed4ed125a120122d to your computer and use it in GitHub Desktop.
XLA-HLO to MLIR-HLO for TF with training

How to get the xla dump

python mnist.py

caveat: need to be run in TF >= 2.4.0

import tensorflow as tf
# Size of each input image, 28 x 28 pixels
IMAGE_SIZE = 28 * 28
# Number of distinct number labels, [0..9]
NUM_CLASSES = 10
# Number of examples in each training batch (step)
TRAIN_BATCH_SIZE = 100
# Number of training steps to run
TRAIN_STEPS = 1000
# Loads MNIST dataset.
train, test = tf.keras.datasets.mnist.load_data()
train_ds = tf.data.Dataset.from_tensor_slices(train).batch(TRAIN_BATCH_SIZE).repeat()
# Casting from raw data to the required datatypes.
def cast(images, labels):
images = tf.cast(
tf.reshape(images, [-1, IMAGE_SIZE]), tf.float32)
labels = tf.cast(labels, tf.int64)
return (images, labels)
layer = tf.keras.layers.Dense(NUM_CLASSES)
optimizer = tf.keras.optimizers.Adam()
@tf.function(experimental_compile=True)
def train_mnist(images, labels):
images, labels = cast(images, labels)
with tf.GradientTape() as tape:
predicted_labels = layer(images)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=predicted_labels, labels=labels
))
layer_variables = layer.trainable_variables
grads = tape.gradient(loss, layer_variables)
optimizer.apply_gradients(zip(grads, layer_variables))
images, labels = cast(test[0], test[1])
with open("mnist.xla", 'w') as f:
dump = train_mnist.experimental_get_compiler_ir(images, labels)(stage='hlo')
f.write(dump)
HloModule a_inference_train_mnist_194__.192, input_output_alias={ {0}: (2, {}, may-alias), {1}: (3, {}, may-alias), {2}: (5, {}, may-alias), {3}: (8, {}, may-alias), {4}: (9, {}, may-alias), {5}: (10, {}, may-alias), {6}: (11, {}, may-alias) }
%max_float_.59 (x.60: f32[], y.61: f32[]) -> f32[] {
%x.60 = f32[] parameter(0)
%y.61 = f32[] parameter(1)
ROOT %maximum.62 = f32[] maximum(f32[] %x.60, f32[] %y.61)
}
%add_float_.69 (x.70: f32[], y.71: f32[]) -> f32[] {
%x.70 = f32[] parameter(0)
%y.71 = f32[] parameter(1)
ROOT %add.72 = f32[] add(f32[] %x.70, f32[] %y.71)
}
%add_float_.82 (x.83: f32[], y.84: f32[]) -> f32[] {
%x.83 = f32[] parameter(0)
%y.84 = f32[] parameter(1)
ROOT %add.85 = f32[] add(f32[] %x.83, f32[] %y.84)
}
%Mean-reduction.94 (x.95: f32[], y.96: f32[]) -> f32[] {
%x.95 = f32[] parameter(0)
%y.96 = f32[] parameter(1)
ROOT %add.97 = f32[] add(f32[] %x.95, f32[] %y.96)
}
%add_float_.110 (x.111: f32[], y.112: f32[]) -> f32[] {
%x.111 = f32[] parameter(0)
%y.112 = f32[] parameter(1)
ROOT %add.113 = f32[] add(f32[] %x.111, f32[] %y.112)
}
ENTRY %a_inference_train_mnist_194__.192 (arg0.1: f32[10000,784], arg1.2: s64[10000], arg2.3: f32[784,10], arg3.4: f32[10], arg4.5: f32[], arg5.6: s64[], arg6.7: f32[], arg7.8: f32[], arg8.9: f32[784,10], arg9.10: f32[784,10], arg10.11: f32[10], arg11.12: f32[10]) -> (f32[784,10], f32[10], s64[], f32[784,10], f32[784,10], f32[10], f32[10]) {
%constant.15 = f32[] constant(1), metadata={op_type="Sub" op_name="Adam/sub_2"}
%arg6.7 = f32[] parameter(6), parameter_replication={false}, metadata={op_name="XLA_Args"}
%subtract.16 = f32[] subtract(f32[] %constant.15, f32[] %arg6.7), metadata={op_type="Sub" op_name="Adam/sub_2"}
%constant.17 = f32[] constant(1), metadata={op_type="Sub" op_name="Adam/sub_3"}
%arg7.8 = f32[] parameter(7), parameter_replication={false}, metadata={op_name="XLA_Args"}
%subtract.18 = f32[] subtract(f32[] %constant.17, f32[] %arg7.8), metadata={op_type="Sub" op_name="Adam/sub_3"}
%arg4.5 = f32[] parameter(4), parameter_replication={false}, metadata={op_name="XLA_Args"}
%constant.26 = f32[] constant(1), metadata={op_type="Sub" op_name="Adam/sub"}
%arg5.6 = s64[] parameter(5), parameter_replication={false}, metadata={op_name="XLA_Args"}
%constant.19 = s64[] constant(1), metadata={op_type="AddV2" op_name="Adam/add"}
%add.20 = s64[] add(s64[] %arg5.6, s64[] %constant.19), metadata={op_type="AddV2" op_name="Adam/add"}
%convert.21 = f32[] convert(s64[] %add.20), metadata={op_type="Cast" op_name="Adam/Cast_1"}
%power.25 = f32[] power(f32[] %arg7.8, f32[] %convert.21), metadata={op_type="Pow" op_name="Adam/Pow_1"}
%subtract.27 = f32[] subtract(f32[] %constant.26, f32[] %power.25), metadata={op_type="Sub" op_name="Adam/sub"}
%sqrt.28 = f32[] sqrt(f32[] %subtract.27), metadata={op_type="Sqrt" op_name="Adam/Sqrt"}
%constant.23 = f32[] constant(1), metadata={op_type="Sub" op_name="Adam/sub_1"}
%power.22 = f32[] power(f32[] %arg6.7, f32[] %convert.21), metadata={op_type="Pow" op_name="Adam/Pow"}
%subtract.24 = f32[] subtract(f32[] %constant.23, f32[] %power.22), metadata={op_type="Sub" op_name="Adam/sub_1"}
%divide.29 = f32[] divide(f32[] %sqrt.28, f32[] %subtract.24), metadata={op_type="RealDiv" op_name="Adam/truediv"}
%multiply.30 = f32[] multiply(f32[] %arg4.5, f32[] %divide.29), metadata={op_type="Mul" op_name="Adam/mul"}
%arg1.2 = s64[10000]{0} parameter(1), parameter_replication={false}, metadata={op_name="XLA_Args"}
%reshape.14 = s64[10000]{0} reshape(s64[10000]{0} %arg1.2)
%broadcast.39 = s64[10000,10]{1,0} broadcast(s64[10000]{0} %reshape.14), dimensions={0}, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%iota.38 = s64[10000,10]{1,0} iota(), iota_dimension=1, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%compare.40 = pred[10000,10]{1,0} compare(s64[10000,10]{1,0} %broadcast.39, s64[10000,10]{1,0} %iota.38), direction=EQ, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%constant.36 = f32[] constant(1), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%broadcast.41 = f32[10000,10]{1,0} broadcast(f32[] %constant.36), dimensions={}, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%constant.37 = f32[] constant(0), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%broadcast.42 = f32[10000,10]{1,0} broadcast(f32[] %constant.37), dimensions={}, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%select.43 = f32[10000,10]{1,0} select(pred[10000,10]{1,0} %compare.40, f32[10000,10]{1,0} %broadcast.41, f32[10000,10]{1,0} %broadcast.42), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%constant.44 = s64[] constant(0), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%broadcast.45 = s64[10000]{0} broadcast(s64[] %constant.44), dimensions={}, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%compare.46 = pred[10000]{0} compare(s64[10000]{0} %broadcast.45, s64[10000]{0} %reshape.14), direction=LE, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%constant.47 = s64[] constant(10), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%broadcast.48 = s64[10000]{0} broadcast(s64[] %constant.47), dimensions={}, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%compare.49 = pred[10000]{0} compare(s64[10000]{0} %reshape.14, s64[10000]{0} %broadcast.48), direction=LT, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%and.50 = pred[10000]{0} and(pred[10000]{0} %compare.46, pred[10000]{0} %compare.49), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%constant.51 = f32[] constant(0), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%broadcast.52 = f32[10000]{0} broadcast(f32[] %constant.51), dimensions={}, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%constant.53 = f32[] constant(nan), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%broadcast.54 = f32[10000]{0} broadcast(f32[] %constant.53), dimensions={}, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%select.55 = f32[10000]{0} select(pred[10000]{0} %and.50, f32[10000]{0} %broadcast.52, f32[10000]{0} %broadcast.54), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%broadcast.56 = f32[10000,10]{1,0} broadcast(f32[10000]{0} %select.55), dimensions={0}, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%add.57 = f32[10000,10]{1,0} add(f32[10000,10]{1,0} %select.43, f32[10000,10]{1,0} %broadcast.56), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%negate.78 = f32[10000,10]{1,0} negate(f32[10000,10]{1,0} %add.57), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%arg0.1 = f32[10000,784]{1,0} parameter(0), parameter_replication={false}, metadata={op_name="XLA_Args"}
%reshape.13 = f32[10000,784]{1,0} reshape(f32[10000,784]{1,0} %arg0.1)
%reshape.31 = f32[10000,784]{1,0} reshape(f32[10000,784]{1,0} %reshape.13), metadata={op_type="Reshape" op_name="Reshape"}
%arg2.3 = f32[784,10]{1,0} parameter(2), parameter_replication={false}, metadata={op_name="XLA_Args"}
%dot.32 = f32[10000,10]{1,0} dot(f32[10000,784]{1,0} %reshape.31, f32[784,10]{1,0} %arg2.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="MatMul" op_name="dense/MatMul"}
%transpose.33 = f32[10000,10]{1,0} transpose(f32[10000,10]{1,0} %dot.32), dimensions={0,1}, metadata={op_type="MatMul" op_name="dense/MatMul"}
%arg3.4 = f32[10]{0} parameter(3), parameter_replication={false}, metadata={op_name="XLA_Args"}
%broadcast.34 = f32[10000,10]{1,0} broadcast(f32[10]{0} %arg3.4), dimensions={1}, metadata={op_type="BiasAdd" op_name="dense/BiasAdd"}
%add.35 = f32[10000,10]{1,0} add(f32[10000,10]{1,0} %transpose.33, f32[10000,10]{1,0} %broadcast.34), metadata={op_type="BiasAdd" op_name="dense/BiasAdd"}
%constant.58 = f32[] constant(-inf), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%reduce.63 = f32[10000]{0} reduce(f32[10000,10]{1,0} %add.35, f32[] %constant.58), dimensions={1}, to_apply=%max_float_.59, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%broadcast.64 = f32[10000,10]{1,0} broadcast(f32[10000]{0} %reduce.63), dimensions={0}, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%subtract.65 = f32[10000,10]{1,0} subtract(f32[10000,10]{1,0} %add.35, f32[10000,10]{1,0} %broadcast.64), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%exponential.66 = f32[10000,10]{1,0} exponential(f32[10000,10]{1,0} %subtract.65), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%convert.67 = f32[10000,10]{1,0} convert(f32[10000,10]{1,0} %exponential.66), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%constant.68 = f32[] constant(0), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%reduce.73 = f32[10000]{0} reduce(f32[10000,10]{1,0} %convert.67, f32[] %constant.68), dimensions={1}, to_apply=%add_float_.69, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%convert.74 = f32[10000]{0} convert(f32[10000]{0} %reduce.73), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%log.75 = f32[10000]{0} log(f32[10000]{0} %convert.74), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%broadcast.76 = f32[10000,10]{1,0} broadcast(f32[10000]{0} %log.75), dimensions={0}, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%subtract.77 = f32[10000,10]{1,0} subtract(f32[10000,10]{1,0} %subtract.65, f32[10000,10]{1,0} %broadcast.76), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%multiply.79 = f32[10000,10]{1,0} multiply(f32[10000,10]{1,0} %negate.78, f32[10000,10]{1,0} %subtract.77), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%convert.80 = f32[10000,10]{1,0} convert(f32[10000,10]{1,0} %multiply.79), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%constant.81 = f32[] constant(0), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%reduce.86 = f32[10000]{0} reduce(f32[10000,10]{1,0} %convert.80, f32[] %constant.81), dimensions={1}, to_apply=%add_float_.82, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%convert.87 = f32[10000]{0} convert(f32[10000]{0} %reduce.86), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%convert.91 = f32[10000]{0} convert(f32[10000]{0} %convert.87), metadata={op_type="Mean" op_name="Mean"}
%constant.92 = f32[] constant(0), metadata={op_type="Mean" op_name="Mean"}
%convert.93 = f32[] convert(f32[] %constant.92), metadata={op_type="Mean" op_name="Mean"}
%reduce.98 = f32[] reduce(f32[10000]{0} %convert.91, f32[] %convert.93), dimensions={0}, to_apply=%Mean-reduction.94, metadata={op_type="Mean" op_name="Mean"}
%constant.99 = s32[] constant(10000), metadata={op_type="Mean" op_name="Mean"}
%convert.100 = f32[] convert(s32[] %constant.99), metadata={op_type="Mean" op_name="Mean"}
%divide.101 = f32[] divide(f32[] %reduce.98, f32[] %convert.100), metadata={op_type="Mean" op_name="Mean"}
%convert.102 = f32[] convert(f32[] %divide.101), metadata={op_type="Mean" op_name="Mean"}
%arg8.9 = f32[784,10]{1,0} parameter(8), parameter_replication={false}, metadata={op_name="XLA_Args"}
%constant.103 = f32[] constant(0.0001), metadata={op_type="Mul" op_name="gradient_tape/SparseSoftmaxCrossEntropyWithLogits/mul"}
%broadcast.104 = f32[10000,1]{1,0} broadcast(f32[] %constant.103), dimensions={}, metadata={op_type="Mul" op_name="gradient_tape/SparseSoftmaxCrossEntropyWithLogits/mul"}
%reshape.105 = f32[10000]{0} reshape(f32[10000,1]{1,0} %broadcast.104), metadata={op_type="Mul" op_name="gradient_tape/SparseSoftmaxCrossEntropyWithLogits/mul"}
%broadcast.106 = f32[10000,10]{1,0} broadcast(f32[10000]{0} %reshape.105), dimensions={0}, metadata={op_type="Mul" op_name="gradient_tape/SparseSoftmaxCrossEntropyWithLogits/mul"}
%broadcast.88 = f32[10000,10]{1,0} broadcast(f32[10000]{0} %convert.74), dimensions={0}, metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%divide.89 = f32[10000,10]{1,0} divide(f32[10000,10]{1,0} %exponential.66, f32[10000,10]{1,0} %broadcast.88), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%subtract.90 = f32[10000,10]{1,0} subtract(f32[10000,10]{1,0} %divide.89, f32[10000,10]{1,0} %add.57), metadata={op_type="SparseSoftmaxCrossEntropyWithLogits" op_name="SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"}
%multiply.107 = f32[10000,10]{1,0} multiply(f32[10000,10]{1,0} %broadcast.106, f32[10000,10]{1,0} %subtract.90), metadata={op_type="Mul" op_name="gradient_tape/SparseSoftmaxCrossEntropyWithLogits/mul"}
%dot.141 = f32[784,10]{1,0} dot(f32[10000,784]{1,0} %reshape.31, f32[10000,10]{1,0} %multiply.107), lhs_contracting_dims={0}, rhs_contracting_dims={0}, metadata={op_type="MatMul" op_name="gradient_tape/dense/MatMul"}
%transpose.142 = f32[784,10]{1,0} transpose(f32[784,10]{1,0} %dot.141), dimensions={0,1}, metadata={op_type="MatMul" op_name="gradient_tape/dense/MatMul"}
%subtract.150 = f32[784,10]{1,0} subtract(f32[784,10]{1,0} %transpose.142, f32[784,10]{1,0} %arg8.9), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%constant.144 = f32[] constant(1), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%subtract.151 = f32[] subtract(f32[] %constant.144, f32[] %arg6.7), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%broadcast.152 = f32[784,10]{1,0} broadcast(f32[] %subtract.151), dimensions={}, metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%multiply.153 = f32[784,10]{1,0} multiply(f32[784,10]{1,0} %subtract.150, f32[784,10]{1,0} %broadcast.152), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%add.154 = f32[784,10]{1,0} add(f32[784,10]{1,0} %arg8.9, f32[784,10]{1,0} %multiply.153), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%subtract.145 = f32[] subtract(f32[] %constant.144, f32[] %power.25), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%sqrt.146 = f32[] sqrt(f32[] %subtract.145), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%multiply.147 = f32[] multiply(f32[] %arg4.5, f32[] %sqrt.146), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%subtract.148 = f32[] subtract(f32[] %constant.144, f32[] %power.22), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%divide.149 = f32[] divide(f32[] %multiply.147, f32[] %subtract.148), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%broadcast.161 = f32[784,10]{1,0} broadcast(f32[] %divide.149), dimensions={}, metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%multiply.162 = f32[784,10]{1,0} multiply(f32[784,10]{1,0} %add.154, f32[784,10]{1,0} %broadcast.161), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%arg9.10 = f32[784,10]{1,0} parameter(9), parameter_replication={false}, metadata={op_name="XLA_Args"}
%multiply.155 = f32[784,10]{1,0} multiply(f32[784,10]{1,0} %transpose.142, f32[784,10]{1,0} %transpose.142), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%subtract.156 = f32[784,10]{1,0} subtract(f32[784,10]{1,0} %multiply.155, f32[784,10]{1,0} %arg9.10), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%subtract.157 = f32[] subtract(f32[] %constant.144, f32[] %arg7.8), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%broadcast.158 = f32[784,10]{1,0} broadcast(f32[] %subtract.157), dimensions={}, metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%multiply.159 = f32[784,10]{1,0} multiply(f32[784,10]{1,0} %subtract.156, f32[784,10]{1,0} %broadcast.158), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%add.160 = f32[784,10]{1,0} add(f32[784,10]{1,0} %arg9.10, f32[784,10]{1,0} %multiply.159), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%sqrt.163 = f32[784,10]{1,0} sqrt(f32[784,10]{1,0} %add.160), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%constant.143 = f32[] constant(1e-07), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%broadcast.164 = f32[784,10]{1,0} broadcast(f32[] %constant.143), dimensions={}, metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%add.165 = f32[784,10]{1,0} add(f32[784,10]{1,0} %sqrt.163, f32[784,10]{1,0} %broadcast.164), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%divide.166 = f32[784,10]{1,0} divide(f32[784,10]{1,0} %multiply.162, f32[784,10]{1,0} %add.165), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%subtract.167 = f32[784,10]{1,0} subtract(f32[784,10]{1,0} %arg2.3, f32[784,10]{1,0} %divide.166), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update/ResourceApplyAdam"}
%reshape.170 = f32[784,10]{1,0} reshape(f32[784,10]{1,0} %subtract.167), metadata={op_name="XLA_Retvals"}
%tuple.171 = (f32[784,10]{1,0}) tuple(f32[784,10]{1,0} %reshape.170), metadata={op_name="XLA_Retvals"}
%get-tuple-element.172 = f32[784,10]{1,0} get-tuple-element((f32[784,10]{1,0}) %tuple.171), index=0, metadata={op_name="XLA_Retvals"}
%arg10.11 = f32[10]{0} parameter(10), parameter_replication={false}, metadata={op_name="XLA_Args"}
%convert.108 = f32[10000,10]{1,0} convert(f32[10000,10]{1,0} %multiply.107), metadata={op_type="BiasAddGrad" op_name="gradient_tape/dense/BiasAdd/BiasAddGrad"}
%constant.109 = f32[] constant(0), metadata={op_type="BiasAddGrad" op_name="gradient_tape/dense/BiasAdd/BiasAddGrad"}
%reduce.114 = f32[10]{0} reduce(f32[10000,10]{1,0} %convert.108, f32[] %constant.109), dimensions={0}, to_apply=%add_float_.110, metadata={op_type="BiasAddGrad" op_name="gradient_tape/dense/BiasAdd/BiasAddGrad"}
%convert.115 = f32[10]{0} convert(f32[10]{0} %reduce.114), metadata={op_type="BiasAddGrad" op_name="gradient_tape/dense/BiasAdd/BiasAddGrad"}
%subtract.123 = f32[10]{0} subtract(f32[10]{0} %convert.115, f32[10]{0} %arg10.11), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%constant.117 = f32[] constant(1), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%subtract.124 = f32[] subtract(f32[] %constant.117, f32[] %arg6.7), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%broadcast.125 = f32[10]{0} broadcast(f32[] %subtract.124), dimensions={}, metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%multiply.126 = f32[10]{0} multiply(f32[10]{0} %subtract.123, f32[10]{0} %broadcast.125), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%add.127 = f32[10]{0} add(f32[10]{0} %arg10.11, f32[10]{0} %multiply.126), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%subtract.118 = f32[] subtract(f32[] %constant.117, f32[] %power.25), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%sqrt.119 = f32[] sqrt(f32[] %subtract.118), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%multiply.120 = f32[] multiply(f32[] %arg4.5, f32[] %sqrt.119), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%subtract.121 = f32[] subtract(f32[] %constant.117, f32[] %power.22), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%divide.122 = f32[] divide(f32[] %multiply.120, f32[] %subtract.121), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%broadcast.134 = f32[10]{0} broadcast(f32[] %divide.122), dimensions={}, metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%multiply.135 = f32[10]{0} multiply(f32[10]{0} %add.127, f32[10]{0} %broadcast.134), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%arg11.12 = f32[10]{0} parameter(11), parameter_replication={false}, metadata={op_name="XLA_Args"}
%multiply.128 = f32[10]{0} multiply(f32[10]{0} %convert.115, f32[10]{0} %convert.115), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%subtract.129 = f32[10]{0} subtract(f32[10]{0} %multiply.128, f32[10]{0} %arg11.12), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%subtract.130 = f32[] subtract(f32[] %constant.117, f32[] %arg7.8), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%broadcast.131 = f32[10]{0} broadcast(f32[] %subtract.130), dimensions={}, metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%multiply.132 = f32[10]{0} multiply(f32[10]{0} %subtract.129, f32[10]{0} %broadcast.131), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%add.133 = f32[10]{0} add(f32[10]{0} %arg11.12, f32[10]{0} %multiply.132), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%sqrt.136 = f32[10]{0} sqrt(f32[10]{0} %add.133), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%constant.116 = f32[] constant(1e-07), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%broadcast.137 = f32[10]{0} broadcast(f32[] %constant.116), dimensions={}, metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%add.138 = f32[10]{0} add(f32[10]{0} %sqrt.136, f32[10]{0} %broadcast.137), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%divide.139 = f32[10]{0} divide(f32[10]{0} %multiply.135, f32[10]{0} %add.138), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%subtract.140 = f32[10]{0} subtract(f32[10]{0} %arg3.4, f32[10]{0} %divide.139), metadata={op_type="ResourceApplyAdam" op_name="Adam/Adam/update_1/ResourceApplyAdam"}
%reshape.173 = f32[10]{0} reshape(f32[10]{0} %subtract.140), metadata={op_name="XLA_Retvals"}
%tuple.174 = (f32[10]{0}) tuple(f32[10]{0} %reshape.173), metadata={op_name="XLA_Retvals"}
%get-tuple-element.175 = f32[10]{0} get-tuple-element((f32[10]{0}) %tuple.174), index=0, metadata={op_name="XLA_Retvals"}
%constant.168 = s64[] constant(1), metadata={op_type="AssignAddVariableOp" op_name="Adam/Adam/AssignAddVariableOp"}
%add.169 = s64[] add(s64[] %arg5.6, s64[] %constant.168), metadata={op_type="AssignAddVariableOp" op_name="Adam/Adam/AssignAddVariableOp"}
%reshape.176 = s64[] reshape(s64[] %add.169), metadata={op_name="XLA_Retvals"}
%tuple.177 = (s64[]) tuple(s64[] %reshape.176), metadata={op_name="XLA_Retvals"}
%get-tuple-element.178 = s64[] get-tuple-element((s64[]) %tuple.177), index=0, metadata={op_name="XLA_Retvals"}
%reshape.179 = f32[784,10]{1,0} reshape(f32[784,10]{1,0} %add.154), metadata={op_name="XLA_Retvals"}
%tuple.180 = (f32[784,10]{1,0}) tuple(f32[784,10]{1,0} %reshape.179), metadata={op_name="XLA_Retvals"}
%get-tuple-element.181 = f32[784,10]{1,0} get-tuple-element((f32[784,10]{1,0}) %tuple.180), index=0, metadata={op_name="XLA_Retvals"}
%reshape.182 = f32[784,10]{1,0} reshape(f32[784,10]{1,0} %add.160), metadata={op_name="XLA_Retvals"}
%tuple.183 = (f32[784,10]{1,0}) tuple(f32[784,10]{1,0} %reshape.182), metadata={op_name="XLA_Retvals"}
%get-tuple-element.184 = f32[784,10]{1,0} get-tuple-element((f32[784,10]{1,0}) %tuple.183), index=0, metadata={op_name="XLA_Retvals"}
%reshape.185 = f32[10]{0} reshape(f32[10]{0} %add.127), metadata={op_name="XLA_Retvals"}
%tuple.186 = (f32[10]{0}) tuple(f32[10]{0} %reshape.185), metadata={op_name="XLA_Retvals"}
%get-tuple-element.187 = f32[10]{0} get-tuple-element((f32[10]{0}) %tuple.186), index=0, metadata={op_name="XLA_Retvals"}
%reshape.188 = f32[10]{0} reshape(f32[10]{0} %add.133), metadata={op_name="XLA_Retvals"}
%tuple.189 = (f32[10]{0}) tuple(f32[10]{0} %reshape.188), metadata={op_name="XLA_Retvals"}
%get-tuple-element.190 = f32[10]{0} get-tuple-element((f32[10]{0}) %tuple.189), index=0, metadata={op_name="XLA_Retvals"}
ROOT %tuple.191 = (f32[784,10]{1,0}, f32[10]{0}, s64[], f32[784,10]{1,0}, f32[784,10]{1,0}, f32[10]{0}, f32[10]{0}) tuple(f32[784,10]{1,0} %get-tuple-element.172, f32[10]{0} %get-tuple-element.175, s64[] %get-tuple-element.178, f32[784,10]{1,0} %get-tuple-element.181, f32[784,10]{1,0} %get-tuple-element.184, f32[10]{0} %get-tuple-element.187, f32[10]{0} %get-tuple-element.190), metadata={op_name="XLA_Retvals"}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment