Skip to content

Instantly share code, notes, and snippets.

@ManfeiBai
Created August 5, 2024 21:46
Show Gist options
  • Save ManfeiBai/73ee0264113f1968135976bee80ef4a0 to your computer and use it in GitHub Desktop.
Save ManfeiBai/73ee0264113f1968135976bee80ef4a0 to your computer and use it in GitHub Desktop.
dump HLO for changable weight/bias for while_loop
(torch310) root@b7b12c30e894:/pytorch/xla# ls /tmp
(torch310) root@b7b12c30e894:/pytorch/xla# XLA_IR_DEBUG=1 XLA_HLO_DEBUG=1 XLA_SAVE_TENSORS_FMT="hlo" XLA_SAVE_TENSORS_FILE="/tmp/save1.hlo" XLA_FLAGS=--xla_dump_to=/src/repo/hlogeneratedwhengenerateir PJRT_DEVICE=TPU python test/test_while_loop.py WhileLoopTest.test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean
inputs: tensor([[[ 5.1000, 6.2000],
[ 7.3000, 8.4000]],
[[16.0000, 17.0000],
[ 0.0000, 0.0000]],
[[ 1.0000, 2.0000],
[ 3.0000, 4.0000]]], device='xla:0')
res: tensor([[[1.0000e+00, 2.0000e+00],
[3.0000e+00, 4.0000e+00]],
[[1.0000e+00, 2.0000e+00],
[0.0000e+00, 0.0000e+00]],
[[6.4950e+02, 1.4155e+03],
[1.1230e+03, 2.4480e+03]]], device='xla:0')
final expected: tensor([[[ 5.1000, 6.2000],
[ 7.3000, 8.4000]],
[[ 16.0000, 17.0000],
[ 0.0000, 0.0000]],
[[ 649.5000, 1415.5000],
[1123.0000, 2448.0000]]], device='xla:0')
.
----------------------------------------------------------------------
Ran 1 test in 3.567s
OK
(torch310) root@b7b12c30e894:/pytorch/xla# cat /tmp/save1.hlo
cat: /tmp/save1.hlo: No such file or directory
(torch310) root@b7b12c30e894:/pytorch/xla# cat /tmp/save1.hlo.0
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
_str_intern (/pytorch/torch/_tensor_str.py:432)
_str (/pytorch/torch/_tensor_str.py:697)
__repr__ (/pytorch/torch/_tensor.py:462)
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:134)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (105673256fa2efa655e5b7fab0edb22e)
## BEGIN_GRAPH
HloModule IrToHlo.13, entry_computation_layout={(f32[2,2]{1,0}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1})->(f32[3,2,2]{2,1,0})}
ENTRY %IrToHlo.13 (p0.1: f32[2,2], p1.2: f32[3,2,2], p2.5: f32[3,2,2]) -> (f32[3,2,2]) {
%p2.5 = f32[3,2,2]{0,2,1} parameter(2), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=105}
%slice.6 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p2.5), slice={[0:1], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=130}
%reshape.7 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.6), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=130}
%reshape.8 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %reshape.7), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=130}
%p1.2 = f32[3,2,2]{0,2,1} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=109}
%slice.3 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p1.2), slice={[0:1], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=130}
%reshape.4 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.3), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=130}
%reshape.9 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %reshape.4), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=130}
%p0.1 = f32[2,2]{1,0} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=131}
%reshape.10 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %p0.1), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=130}
%concatenate.11 = f32[3,2,2]{2,1,0} concatenate(f32[1,2,2]{2,1,0} %reshape.8, f32[1,2,2]{2,1,0} %reshape.9, f32[1,2,2]{2,1,0} %reshape.10), dimensions={0}, metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=130}
ROOT %tuple.12 = (f32[3,2,2]{2,1,0}) tuple(f32[3,2,2]{2,1,0} %concatenate.11)
}
Graph Hash: b65067cc0ea4618fc73629a00f96d74f
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
body_fn (/pytorch/xla/test/test_while_loop.py:127)
new_body_fn (/pytorch/xla/torch_xla/experimental/fori_loop.py:52)
_xla_while_loop (/pytorch/xla/torch_xla/experimental/fori_loop.py:107)
_xla_while_loop_wrapper (/pytorch/xla/torch_xla/experimental/fori_loop.py:61)
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:136)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (680b8b38cacabc0ba280595d3650ad01)
## BEGIN_GRAPH
HloModule IrToHlo.12, entry_computation_layout={(s32[])->(s32[])}
ENTRY %IrToHlo.12 (p0.5: s32[]) -> (s32[]) {
%p0.5 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.4 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%constant.3 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%multiply.6 = s32[] multiply(s32[] %constant.4, s32[] %constant.3), metadata={op_type="aten__sub" op_name="aten__sub.1/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%subtract.7 = s32[] subtract(s32[] %p0.5, s32[] %multiply.6), metadata={op_type="aten__sub" op_name="aten__sub.1/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%negate.8 = s32[] negate(s32[] %subtract.7), metadata={op_type="aten__neg" op_name="aten__neg" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%constant.2 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%constant.1 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%multiply.9 = s32[] multiply(s32[] %constant.2, s32[] %constant.1), metadata={op_type="aten__sub" op_name="aten__sub.2/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%subtract.10 = s32[] subtract(s32[] %negate.8, s32[] %multiply.9), metadata={op_type="aten__sub" op_name="aten__sub.2/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
ROOT %tuple.11 = (s32[]) tuple(s32[] %subtract.10)
}
Graph Hash: 4bc622aece4fc977dddcce298b2c7ab9
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
body_fn (/pytorch/xla/test/test_while_loop.py:127)
new_body_fn (/pytorch/xla/torch_xla/experimental/fori_loop.py:52)
_xla_while_loop (/pytorch/xla/torch_xla/experimental/fori_loop.py:107)
_xla_while_loop_wrapper (/pytorch/xla/torch_xla/experimental/fori_loop.py:61)
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:136)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (680b8b38cacabc0ba280595d3650ad01)
## BEGIN_GRAPH
HloModule IrToHlo.12, entry_computation_layout={(s32[])->(s32[])}
ENTRY %IrToHlo.12 (p0.5: s32[]) -> (s32[]) {
%p0.5 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.4 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%constant.3 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%multiply.6 = s32[] multiply(s32[] %constant.4, s32[] %constant.3), metadata={op_type="aten__sub" op_name="aten__sub.1/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%subtract.7 = s32[] subtract(s32[] %p0.5, s32[] %multiply.6), metadata={op_type="aten__sub" op_name="aten__sub.1/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%negate.8 = s32[] negate(s32[] %subtract.7), metadata={op_type="aten__neg" op_name="aten__neg" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%constant.2 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%constant.1 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%multiply.9 = s32[] multiply(s32[] %constant.2, s32[] %constant.1), metadata={op_type="aten__sub" op_name="aten__sub.3/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%subtract.10 = s32[] subtract(s32[] %negate.8, s32[] %multiply.9), metadata={op_type="aten__sub" op_name="aten__sub.3/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
ROOT %tuple.11 = (s32[]) tuple(s32[] %subtract.10)
}
Graph Hash: 4bc622aece4fc977dddcce298b2c7ab9
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
_str_intern (/pytorch/torch/_tensor_str.py:432)
_str (/pytorch/torch/_tensor_str.py:697)
__repr__ (/pytorch/torch/_tensor.py:462)
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:139)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (bd7fd91ffb6352e9eb19b8b9d106b8c8)
## BEGIN_GRAPH
HloModule IrToHlo.73, entry_computation_layout={(f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, s32[])->(f32[3,2,2]{2,1,0})}
%PyLoweringContext.5 (p0.8: s32[], p1.11: f32[3,2,2], p2.12: f32[3,2,2], p3.13: f32[3,2,2]) -> (s32[], f32[3,2,2], f32[3,2,2], f32[3,2,2]) {
%p0.8 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.7 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%constant.6 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%multiply.9 = s32[] multiply(s32[] %constant.7, s32[] %constant.6), metadata={op_type="aten__sub" op_name="aten__sub.1/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%subtract.10 = s32[] subtract(s32[] %p0.8, s32[] %multiply.9), metadata={op_type="aten__sub" op_name="aten__sub.1/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=125}
%p1.11 = f32[3,2,2]{0,2,1} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=105}
%p2.12 = f32[3,2,2]{0,2,1} parameter(2), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=109}
%slice.31 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p1.11), slice={[1:2], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%reshape.32 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.31), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%reshape.33 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %reshape.32), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=126}
%slice.29 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p2.12), slice={[1:2], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%reshape.30 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.29), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=127}
%reshape.34 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %reshape.30), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=126}
%p3.13 = f32[3,2,2]{0,2,1} parameter(3), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=117}
%slice.21 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p3.13), slice={[2:3], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=119}
%reshape.22 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.21), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=119}
%slice.18 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p3.13), slice={[0:1], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=117}
%reshape.19 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.18), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=117}
%transpose.20 = f32[2,2]{0,1} transpose(f32[2,2]{1,0} %reshape.19), dimensions={1,0}, metadata={op_type="aten__permute" op_name="aten__permute" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%dot.23 = f32[2,2]{1,0} dot(f32[2,2]{1,0} %reshape.22, f32[2,2]{0,1} %transpose.20), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%slice.14 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p3.13), slice={[1:2], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=118}
%reshape.15 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.14), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=118}
%slice.16 = f32[1,2]{1,0} slice(f32[2,2]{1,0} %reshape.15), slice={[0:1], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=118}
%reshape.17 = f32[2]{0} reshape(f32[1,2]{1,0} %slice.16), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=118}
%reshape.24 = f32[1,2]{1,0} reshape(f32[2]{0} %reshape.17), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%broadcast.25 = f32[1,2]{1,0} broadcast(f32[1,2]{1,0} %reshape.24), dimensions={0,1}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%reshape.26 = f32[2]{0} reshape(f32[1,2]{1,0} %broadcast.25), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%broadcast.27 = f32[2,2]{1,0} broadcast(f32[2]{0} %reshape.26), dimensions={1}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%add.28 = f32[2,2]{1,0} add(f32[2,2]{1,0} %dot.23, f32[2,2]{1,0} %broadcast.27), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%reshape.35 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %add.28), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=126}
%concatenate.36 = f32[3,2,2]{2,1,0} concatenate(f32[1,2,2]{2,1,0} %reshape.33, f32[1,2,2]{2,1,0} %reshape.34, f32[1,2,2]{2,1,0} %reshape.35), dimensions={0}, metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=126}
ROOT %tuple.37 = (s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{2,1,0}) tuple(s32[] %subtract.10, f32[3,2,2]{0,2,1} %p1.11, f32[3,2,2]{0,2,1} %p2.12, f32[3,2,2]{2,1,0} %concatenate.36)
}
%PyLoweringContext.33.38 (in.39: (s32[], f32[3,2,2], f32[3,2,2], f32[3,2,2])) -> (s32[], f32[3,2,2], f32[3,2,2], f32[3,2,2]) {
%in.39 = (s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}) parameter(0)
%get-tuple-element.40 = s32[] get-tuple-element((s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}) %in.39), index=0
%get-tuple-element.41 = f32[3,2,2]{0,2,1} get-tuple-element((s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}) %in.39), index=1
%get-tuple-element.42 = f32[3,2,2]{0,2,1} get-tuple-element((s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}) %in.39), index=2
%get-tuple-element.43 = f32[3,2,2]{0,2,1} get-tuple-element((s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}) %in.39), index=3
ROOT %call.44 = (s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{2,1,0}) call(s32[] %get-tuple-element.40, f32[3,2,2]{0,2,1} %get-tuple-element.41, f32[3,2,2]{0,2,1} %get-tuple-element.42, f32[3,2,2]{0,2,1} %get-tuple-element.43), to_apply=%PyLoweringContext.5
}
%PyLoweringContext.45 (p0.47: s32[], UnusedArgumentsPlaceholder.50: f32[3,2,2], UnusedArgumentsPlaceholder.51: f32[3,2,2], UnusedArgumentsPlaceholder.52: f32[3,2,2]) -> pred[] {
%p0.47 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%convert.48 = s64[] convert(s32[] %p0.47), metadata={op_type="aten__ge" op_name="aten__ge" source_file="/pytorch/xla/test/test_while_loop.py" source_line=114}
%constant.46 = s64[] constant(0), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=114}
ROOT %compare.49 = pred[] compare(s64[] %convert.48, s64[] %constant.46), direction=GE, metadata={op_type="aten__ge" op_name="aten__ge" source_file="/pytorch/xla/test/test_while_loop.py" source_line=114}
%UnusedArgumentsPlaceholder.50 = f32[3,2,2]{0,2,1} parameter(1)
%UnusedArgumentsPlaceholder.51 = f32[3,2,2]{0,2,1} parameter(2)
%UnusedArgumentsPlaceholder.52 = f32[3,2,2]{0,2,1} parameter(3)
}
%PyLoweringContext.8.53 (in.54: (s32[], f32[3,2,2], f32[3,2,2], f32[3,2,2])) -> pred[] {
%in.54 = (s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}) parameter(0)
%get-tuple-element.55 = s32[] get-tuple-element((s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}) %in.54), index=0
%get-tuple-element.56 = f32[3,2,2]{0,2,1} get-tuple-element((s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}) %in.54), index=1
%get-tuple-element.57 = f32[3,2,2]{0,2,1} get-tuple-element((s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}) %in.54), index=2
%get-tuple-element.58 = f32[3,2,2]{0,2,1} get-tuple-element((s32[], f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}) %in.54), index=3
ROOT %call.59 = pred[] call(s32[] %get-tuple-element.55, f32[3,2,2]{0,2,1} %get-tuple-element.56, f32[3,2,2]{0,2,1} %get-tuple-element.57, f32[3,2,2]{0,2,1} %get-tuple-element.58), to_apply=%PyLoweringContext.45
}
%while_loop.60 (p0.61: s32[], p1.62: f32[3,2,2], p2.63: f32[3,2,2], p3.64: f32[3,2,2]) -> (s32[], f32[3,2,2], f32[3,2,2], f32[3,2,2]) {
%p0.61 = s32[] parameter(0)
%p1.62 = f32[3,2,2]{2,1,0} parameter(1)
%p2.63 = f32[3,2,2]{2,1,0} parameter(2)
%p3.64 = f32[3,2,2]{2,1,0} parameter(3)
%tuple.65 = (s32[], f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}) tuple(s32[] %p0.61, f32[3,2,2]{2,1,0} %p1.62, f32[3,2,2]{2,1,0} %p2.63, f32[3,2,2]{2,1,0} %p3.64)
ROOT %while.66 = (s32[], f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}) while((s32[], f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}) %tuple.65), condition=%PyLoweringContext.8.53, body=%PyLoweringContext.33.38
}
ENTRY %IrToHlo.73 (p0.1: f32[3,2,2], p1.2: f32[3,2,2], p2.3: f32[3,2,2], p3.4: s32[]) -> (f32[3,2,2]) {
%p3.4 = s32[] parameter(3), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%p2.3 = f32[3,2,2]{0,2,1} parameter(2), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=105}
%p1.2 = f32[3,2,2]{0,2,1} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=109}
%p0.1 = f32[3,2,2]{0,2,1} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=117}
%call.67 = (s32[], f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}) call(s32[] %p3.4, f32[3,2,2]{0,2,1} %p2.3, f32[3,2,2]{0,2,1} %p1.2, f32[3,2,2]{0,2,1} %p0.1), to_apply=%while_loop.60, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=155}
%get-tuple-element.68 = s32[] get-tuple-element((s32[], f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}) %call.67), index=0, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=155}
%get-tuple-element.69 = f32[3,2,2]{2,1,0} get-tuple-element((s32[], f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}) %call.67), index=1, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=155}
%get-tuple-element.70 = f32[3,2,2]{2,1,0} get-tuple-element((s32[], f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}) %call.67), index=2, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=155}
%get-tuple-element.71 = f32[3,2,2]{2,1,0} get-tuple-element((s32[], f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}, f32[3,2,2]{2,1,0}) %call.67), index=3, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=155}
ROOT %tuple.72 = (f32[3,2,2]{2,1,0}) tuple(f32[3,2,2]{2,1,0} %get-tuple-element.71)
}
Graph Hash: 26d0080b5daa209183a22cc85555d55
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:142)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (a461c1dac130cf1f4525bdf9d4e39d10)
## BEGIN_GRAPH
HloModule IrToHlo.6, entry_computation_layout={(s32[])->(pred[])}
ENTRY %IrToHlo.6 (p0.2: s32[]) -> (pred[]) {
%p0.2 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%convert.3 = s64[] convert(s32[] %p0.2), metadata={op_type="aten__ge" op_name="aten__ge" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
%constant.1 = s64[] constant(0), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
%compare.4 = pred[] compare(s64[] %convert.3, s64[] %constant.1), direction=GE, metadata={op_type="aten__ge" op_name="aten__ge" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
ROOT %tuple.5 = (pred[]) tuple(pred[] %compare.4)
}
Graph Hash: d408f8ab03a4e606a0f2338719f4e9b2
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:152)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (680b8b38cacabc0ba280595d3650ad01)
## BEGIN_GRAPH
HloModule IrToHlo.12, entry_computation_layout={(s32[])->(s32[])}
ENTRY %IrToHlo.12 (p0.5: s32[]) -> (s32[]) {
%p0.5 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.4 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.3 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.6 = s32[] multiply(s32[] %constant.4, s32[] %constant.3), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.7 = s32[] subtract(s32[] %p0.5, s32[] %multiply.6), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%negate.8 = s32[] negate(s32[] %subtract.7), metadata={op_type="aten__neg" op_name="aten__neg" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.2 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.1 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%multiply.9 = s32[] multiply(s32[] %constant.2, s32[] %constant.1), metadata={op_type="aten__sub" op_name="aten__sub.5/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%subtract.10 = s32[] subtract(s32[] %negate.8, s32[] %multiply.9), metadata={op_type="aten__sub" op_name="aten__sub.5/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
ROOT %tuple.11 = (s32[]) tuple(s32[] %subtract.10)
}
Graph Hash: 4bc622aece4fc977dddcce298b2c7ab9
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:152)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (680b8b38cacabc0ba280595d3650ad01)
## BEGIN_GRAPH
HloModule IrToHlo.12, entry_computation_layout={(s32[])->(s32[])}
ENTRY %IrToHlo.12 (p0.5: s32[]) -> (s32[]) {
%p0.5 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.4 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.3 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.6 = s32[] multiply(s32[] %constant.4, s32[] %constant.3), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.7 = s32[] subtract(s32[] %p0.5, s32[] %multiply.6), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%negate.8 = s32[] negate(s32[] %subtract.7), metadata={op_type="aten__neg" op_name="aten__neg" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.2 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.1 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%multiply.9 = s32[] multiply(s32[] %constant.2, s32[] %constant.1), metadata={op_type="aten__sub" op_name="aten__sub.6/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%subtract.10 = s32[] subtract(s32[] %negate.8, s32[] %multiply.9), metadata={op_type="aten__sub" op_name="aten__sub.6/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
ROOT %tuple.11 = (s32[]) tuple(s32[] %subtract.10)
}
Graph Hash: 4bc622aece4fc977dddcce298b2c7ab9
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:142)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (9cff8d03df2b63368afb1250aaafda09)
## BEGIN_GRAPH
HloModule IrToHlo.10, entry_computation_layout={(s32[])->(pred[])}
ENTRY %IrToHlo.10 (p0.4: s32[]) -> (pred[]) {
%p0.4 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.3 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.2 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.5 = s32[] multiply(s32[] %constant.3, s32[] %constant.2), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.6 = s32[] subtract(s32[] %p0.4, s32[] %multiply.5), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%convert.7 = s64[] convert(s32[] %subtract.6), metadata={op_type="aten__ge" op_name="aten__ge" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
%constant.1 = s64[] constant(0), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
%compare.8 = pred[] compare(s64[] %convert.7, s64[] %constant.1), direction=GE, metadata={op_type="aten__ge" op_name="aten__ge" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
ROOT %tuple.9 = (pred[]) tuple(pred[] %compare.8)
}
Graph Hash: c72f1970817fd2561923d22478873039
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:152)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (484fcb0a0920f98c6ada96f4c8d51c43)
## BEGIN_GRAPH
HloModule IrToHlo.16, entry_computation_layout={(s32[])->(s32[])}
ENTRY %IrToHlo.16 (p0.7: s32[]) -> (s32[]) {
%p0.7 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.6 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.5 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.8 = s32[] multiply(s32[] %constant.6, s32[] %constant.5), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.9 = s32[] subtract(s32[] %p0.7, s32[] %multiply.8), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.4 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.3 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.10 = s32[] multiply(s32[] %constant.4, s32[] %constant.3), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.11 = s32[] subtract(s32[] %subtract.9, s32[] %multiply.10), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%negate.12 = s32[] negate(s32[] %subtract.11), metadata={op_type="aten__neg" op_name="aten__neg" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.2 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.1 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%multiply.13 = s32[] multiply(s32[] %constant.2, s32[] %constant.1), metadata={op_type="aten__sub" op_name="aten__sub.8/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%subtract.14 = s32[] subtract(s32[] %negate.12, s32[] %multiply.13), metadata={op_type="aten__sub" op_name="aten__sub.8/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
ROOT %tuple.15 = (s32[]) tuple(s32[] %subtract.14)
}
Graph Hash: 1b5c67a400463b8d72838e85951a9238
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:152)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (484fcb0a0920f98c6ada96f4c8d51c43)
## BEGIN_GRAPH
HloModule IrToHlo.16, entry_computation_layout={(s32[])->(s32[])}
ENTRY %IrToHlo.16 (p0.7: s32[]) -> (s32[]) {
%p0.7 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.6 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.5 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.8 = s32[] multiply(s32[] %constant.6, s32[] %constant.5), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.9 = s32[] subtract(s32[] %p0.7, s32[] %multiply.8), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.4 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.3 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.10 = s32[] multiply(s32[] %constant.4, s32[] %constant.3), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.11 = s32[] subtract(s32[] %subtract.9, s32[] %multiply.10), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%negate.12 = s32[] negate(s32[] %subtract.11), metadata={op_type="aten__neg" op_name="aten__neg" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.2 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.1 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%multiply.13 = s32[] multiply(s32[] %constant.2, s32[] %constant.1), metadata={op_type="aten__sub" op_name="aten__sub.9/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%subtract.14 = s32[] subtract(s32[] %negate.12, s32[] %multiply.13), metadata={op_type="aten__sub" op_name="aten__sub.9/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
ROOT %tuple.15 = (s32[]) tuple(s32[] %subtract.14)
}
Graph Hash: 1b5c67a400463b8d72838e85951a9238
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:142)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (777f23957947f4ca5cb0dfbab89642b4)
## BEGIN_GRAPH
HloModule IrToHlo.14, entry_computation_layout={(s32[])->(pred[])}
ENTRY %IrToHlo.14 (p0.6: s32[]) -> (pred[]) {
%p0.6 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.5 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.4 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.7 = s32[] multiply(s32[] %constant.5, s32[] %constant.4), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.8 = s32[] subtract(s32[] %p0.6, s32[] %multiply.7), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.3 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.2 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.9 = s32[] multiply(s32[] %constant.3, s32[] %constant.2), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.10 = s32[] subtract(s32[] %subtract.8, s32[] %multiply.9), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%convert.11 = s64[] convert(s32[] %subtract.10), metadata={op_type="aten__ge" op_name="aten__ge" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
%constant.1 = s64[] constant(0), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
%compare.12 = pred[] compare(s64[] %convert.11, s64[] %constant.1), direction=GE, metadata={op_type="aten__ge" op_name="aten__ge" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
ROOT %tuple.13 = (pred[]) tuple(pred[] %compare.12)
}
Graph Hash: 7a4100fad79b5ffc4e9e36a97436956b
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:152)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (9bb44d6176f336ac401e4eea1c9afd30)
## BEGIN_GRAPH
HloModule IrToHlo.20, entry_computation_layout={(s32[])->(s32[])}
ENTRY %IrToHlo.20 (p0.9: s32[]) -> (s32[]) {
%p0.9 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.8 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.7 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.10 = s32[] multiply(s32[] %constant.8, s32[] %constant.7), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.11 = s32[] subtract(s32[] %p0.9, s32[] %multiply.10), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.6 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.5 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.12 = s32[] multiply(s32[] %constant.6, s32[] %constant.5), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.13 = s32[] subtract(s32[] %subtract.11, s32[] %multiply.12), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.4 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.3 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.14 = s32[] multiply(s32[] %constant.4, s32[] %constant.3), metadata={op_type="aten__sub" op_name="aten__sub.10/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.15 = s32[] subtract(s32[] %subtract.13, s32[] %multiply.14), metadata={op_type="aten__sub" op_name="aten__sub.10/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%negate.16 = s32[] negate(s32[] %subtract.15), metadata={op_type="aten__neg" op_name="aten__neg" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.2 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.1 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%multiply.17 = s32[] multiply(s32[] %constant.2, s32[] %constant.1), metadata={op_type="aten__sub" op_name="aten__sub.11/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%subtract.18 = s32[] subtract(s32[] %negate.16, s32[] %multiply.17), metadata={op_type="aten__sub" op_name="aten__sub.11/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
ROOT %tuple.19 = (s32[]) tuple(s32[] %subtract.18)
}
Graph Hash: f9892f200915a854b9afd0fd64775431
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:152)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (9bb44d6176f336ac401e4eea1c9afd30)
## BEGIN_GRAPH
HloModule IrToHlo.20, entry_computation_layout={(s32[])->(s32[])}
ENTRY %IrToHlo.20 (p0.9: s32[]) -> (s32[]) {
%p0.9 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.8 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.7 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.10 = s32[] multiply(s32[] %constant.8, s32[] %constant.7), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.11 = s32[] subtract(s32[] %p0.9, s32[] %multiply.10), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.6 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.5 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.12 = s32[] multiply(s32[] %constant.6, s32[] %constant.5), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.13 = s32[] subtract(s32[] %subtract.11, s32[] %multiply.12), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.4 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.3 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.14 = s32[] multiply(s32[] %constant.4, s32[] %constant.3), metadata={op_type="aten__sub" op_name="aten__sub.10/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.15 = s32[] subtract(s32[] %subtract.13, s32[] %multiply.14), metadata={op_type="aten__sub" op_name="aten__sub.10/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%negate.16 = s32[] negate(s32[] %subtract.15), metadata={op_type="aten__neg" op_name="aten__neg" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.2 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%constant.1 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%multiply.17 = s32[] multiply(s32[] %constant.2, s32[] %constant.1), metadata={op_type="aten__sub" op_name="aten__sub.12/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%subtract.18 = s32[] subtract(s32[] %negate.16, s32[] %multiply.17), metadata={op_type="aten__sub" op_name="aten__sub.12/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
ROOT %tuple.19 = (s32[]) tuple(s32[] %subtract.18)
}
Graph Hash: f9892f200915a854b9afd0fd64775431
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:142)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (1fbb903dd1da15d91e800f805a5bb875)
## BEGIN_GRAPH
HloModule IrToHlo.18, entry_computation_layout={(s32[])->(pred[])}
ENTRY %IrToHlo.18 (p0.8: s32[]) -> (pred[]) {
%p0.8 = s32[] parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=135}
%constant.7 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.6 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.9 = s32[] multiply(s32[] %constant.7, s32[] %constant.6), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.10 = s32[] subtract(s32[] %p0.8, s32[] %multiply.9), metadata={op_type="aten__sub" op_name="aten__sub.4/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.5 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.4 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.11 = s32[] multiply(s32[] %constant.5, s32[] %constant.4), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.12 = s32[] subtract(s32[] %subtract.10, s32[] %multiply.11), metadata={op_type="aten__sub" op_name="aten__sub.7/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.3 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%constant.2 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%multiply.13 = s32[] multiply(s32[] %constant.3, s32[] %constant.2), metadata={op_type="aten__sub" op_name="aten__sub.10/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%subtract.14 = s32[] subtract(s32[] %subtract.12, s32[] %multiply.13), metadata={op_type="aten__sub" op_name="aten__sub.10/aten__sub" source_file="/pytorch/xla/test/test_while_loop.py" source_line=151}
%convert.15 = s64[] convert(s32[] %subtract.14), metadata={op_type="aten__ge" op_name="aten__ge" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
%constant.1 = s64[] constant(0), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
%compare.16 = pred[] compare(s64[] %convert.15, s64[] %constant.1), direction=GE, metadata={op_type="aten__ge" op_name="aten__ge" source_file="/pytorch/xla/test/test_while_loop.py" source_line=142}
ROOT %tuple.17 = (pred[]) tuple(pred[] %compare.16)
}
Graph Hash: 13997fb789c6e955842e95b9b2a7dab0
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
_str_intern (/pytorch/torch/_tensor_str.py:432)
_str (/pytorch/torch/_tensor_str.py:697)
__repr__ (/pytorch/torch/_tensor.py:462)
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:153)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (892fbeb5f35089112a5b6b9755280a51)
## BEGIN_GRAPH
HloModule IrToHlo.74, entry_computation_layout={(f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1})->(f32[3,2,2]{2,1,0})}
ENTRY %IrToHlo.74 (p0.1: f32[3,2,2], p1.17: f32[3,2,2], p2.20: f32[3,2,2]) -> (f32[3,2,2]) {
%p2.20 = f32[3,2,2]{0,2,1} parameter(2), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=105}
%slice.67 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p2.20), slice={[0:1], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.68 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.67), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.69 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %reshape.68), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%p1.17 = f32[3,2,2]{0,2,1} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=109}
%slice.65 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p1.17), slice={[0:1], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.66 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.65), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.70 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %reshape.66), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%slice.44 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p2.20), slice={[2:3], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.45 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.44), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.46 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %reshape.45), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%slice.42 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p1.17), slice={[2:3], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.43 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.42), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.47 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %reshape.43), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%slice.21 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p2.20), slice={[1:2], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.22 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.21), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.23 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %reshape.22), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%slice.18 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p1.17), slice={[1:2], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.19 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.18), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%reshape.24 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %reshape.19), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%p0.1 = f32[3,2,2]{0,2,1} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=117}
%slice.9 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p0.1), slice={[2:3], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=145}
%reshape.10 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.9), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=145}
%slice.6 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p0.1), slice={[0:1], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=143}
%reshape.7 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.6), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=143}
%transpose.8 = f32[2,2]{0,1} transpose(f32[2,2]{1,0} %reshape.7), dimensions={1,0}, metadata={op_type="aten__permute" op_name="aten__permute" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%dot.11 = f32[2,2]{1,0} dot(f32[2,2]{1,0} %reshape.10, f32[2,2]{0,1} %transpose.8), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%slice.2 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p0.1), slice={[1:2], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%reshape.3 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.2), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%slice.4 = f32[1,2]{1,0} slice(f32[2,2]{1,0} %reshape.3), slice={[0:1], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%reshape.5 = f32[2]{0} reshape(f32[1,2]{1,0} %slice.4), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%reshape.12 = f32[1,2]{1,0} reshape(f32[2]{0} %reshape.5), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%broadcast.13 = f32[1,2]{1,0} broadcast(f32[1,2]{1,0} %reshape.12), dimensions={0,1}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%reshape.14 = f32[2]{0} reshape(f32[1,2]{1,0} %broadcast.13), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%broadcast.15 = f32[2,2]{1,0} broadcast(f32[2]{0} %reshape.14), dimensions={1}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%add.16 = f32[2,2]{1,0} add(f32[2,2]{1,0} %dot.11, f32[2,2]{1,0} %broadcast.15), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%reshape.25 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %add.16), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%concatenate.26 = f32[3,2,2]{2,1,0} concatenate(f32[1,2,2]{2,1,0} %reshape.23, f32[1,2,2]{2,1,0} %reshape.24, f32[1,2,2]{2,1,0} %reshape.25), dimensions={0}, metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%slice.34 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{2,1,0} %concatenate.26), slice={[2:3], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=145}
%reshape.35 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.34), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=145}
%slice.31 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{2,1,0} %concatenate.26), slice={[0:1], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=143}
%reshape.32 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.31), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=143}
%transpose.33 = f32[2,2]{0,1} transpose(f32[2,2]{1,0} %reshape.32), dimensions={1,0}, metadata={op_type="aten__permute" op_name="aten__permute" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%dot.36 = f32[2,2]{1,0} dot(f32[2,2]{1,0} %reshape.35, f32[2,2]{0,1} %transpose.33), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%slice.27 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{2,1,0} %concatenate.26), slice={[1:2], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%reshape.28 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.27), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%slice.29 = f32[1,2]{1,0} slice(f32[2,2]{1,0} %reshape.28), slice={[0:1], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%reshape.30 = f32[2]{0} reshape(f32[1,2]{1,0} %slice.29), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%reshape.37 = f32[1,2]{1,0} reshape(f32[2]{0} %reshape.30), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%broadcast.38 = f32[1,2]{1,0} broadcast(f32[1,2]{1,0} %reshape.37), dimensions={0,1}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%reshape.39 = f32[2]{0} reshape(f32[1,2]{1,0} %broadcast.38), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%broadcast.40 = f32[2,2]{1,0} broadcast(f32[2]{0} %reshape.39), dimensions={1}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%add.41 = f32[2,2]{1,0} add(f32[2,2]{1,0} %dot.36, f32[2,2]{1,0} %broadcast.40), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%reshape.48 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %add.41), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%concatenate.49 = f32[3,2,2]{2,1,0} concatenate(f32[1,2,2]{2,1,0} %reshape.46, f32[1,2,2]{2,1,0} %reshape.47, f32[1,2,2]{2,1,0} %reshape.48), dimensions={0}, metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%slice.57 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{2,1,0} %concatenate.49), slice={[2:3], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=145}
%reshape.58 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.57), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=145}
%slice.54 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{2,1,0} %concatenate.49), slice={[0:1], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=143}
%reshape.55 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.54), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=143}
%transpose.56 = f32[2,2]{0,1} transpose(f32[2,2]{1,0} %reshape.55), dimensions={1,0}, metadata={op_type="aten__permute" op_name="aten__permute" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%dot.59 = f32[2,2]{1,0} dot(f32[2,2]{1,0} %reshape.58, f32[2,2]{0,1} %transpose.56), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%slice.50 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{2,1,0} %concatenate.49), slice={[1:2], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%reshape.51 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.50), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%slice.52 = f32[1,2]{1,0} slice(f32[2,2]{1,0} %reshape.51), slice={[0:1], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%reshape.53 = f32[2]{0} reshape(f32[1,2]{1,0} %slice.52), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=144}
%reshape.60 = f32[1,2]{1,0} reshape(f32[2]{0} %reshape.53), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%broadcast.61 = f32[1,2]{1,0} broadcast(f32[1,2]{1,0} %reshape.60), dimensions={0,1}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%reshape.62 = f32[2]{0} reshape(f32[1,2]{1,0} %broadcast.61), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%broadcast.63 = f32[2,2]{1,0} broadcast(f32[2]{0} %reshape.62), dimensions={1}, metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%add.64 = f32[2,2]{1,0} add(f32[2,2]{1,0} %dot.59, f32[2,2]{1,0} %broadcast.63), metadata={op_type="aten__addmm" op_name="aten__addmm" source_file="/pytorch/torch/nn/modules/linear.py" source_line=116}
%reshape.71 = f32[1,2,2]{2,1,0} reshape(f32[2,2]{1,0} %add.64), metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
%concatenate.72 = f32[3,2,2]{2,1,0} concatenate(f32[1,2,2]{2,1,0} %reshape.69, f32[1,2,2]{2,1,0} %reshape.70, f32[1,2,2]{2,1,0} %reshape.71), dimensions={0}, metadata={op_type="aten__stack" op_name="aten__stack" source_file="/pytorch/xla/test/test_while_loop.py" source_line=152}
ROOT %tuple.73 = (f32[3,2,2]{2,1,0}) tuple(f32[3,2,2]{2,1,0} %concatenate.72)
}
Graph Hash: 99010c1a2fedf1dd50f11ce09100ae3c
## END_GRAPH
[ScheduleSyncTensorsGraph]
TensorsGraphInfo:
assertTrue (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:685)
test_while_loop_simple_linear_outside_loop_change_weight_bias_submit_clean (/pytorch/xla/test/test_while_loop.py:155)
_callTestMethod (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:549)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:591)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/case.py:650)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:122)
__call__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/suite.py:84)
run (/root/miniconda3/envs/torch310/lib/python3.10/unittest/runner.py:184)
runTests (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:271)
__init__ (/root/miniconda3/envs/torch310/lib/python3.10/unittest/main.py:101)
<module> (/pytorch/xla/test/test_while_loop.py:479)
Root Hashes: (cfc319822fc61cb394a8b7ccc3c69ad)
## BEGIN_GRAPH
HloModule IrToHlo.23, entry_computation_layout={(f32[3,2,2]{0,2,1}, f32[3,2,2]{0,2,1})->(pred[])}
%AllComputation.10 (x.11: pred[], y.12: pred[]) -> pred[] {
%x.11 = pred[] parameter(0)
%constant.13 = pred[] constant(false)
%compare.16 = pred[] compare(pred[] %x.11, pred[] %constant.13), direction=NE
%y.12 = pred[] parameter(1)
%compare.15 = pred[] compare(pred[] %y.12, pred[] %constant.13), direction=NE
%and.17 = pred[] and(pred[] %compare.16, pred[] %compare.15)
%constant.14 = pred[] constant(true)
ROOT %select.18 = pred[] select(pred[] %and.17, pred[] %constant.14, pred[] %constant.13)
}
ENTRY %IrToHlo.23 (p0.1: f32[3,2,2], p1.4: f32[3,2,2]) -> (pred[]) {
%constant.8 = s32[] constant(4), metadata={op_type="aten__all" op_name="aten__all" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
%p1.4 = f32[3,2,2]{0,2,1} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
%slice.5 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p1.4), slice={[2:3], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
%reshape.6 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.5), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
%p0.1 = f32[3,2,2]{0,2,1} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
%slice.2 = f32[1,2,2]{2,1,0} slice(f32[3,2,2]{0,2,1} %p0.1), slice={[2:3], [0:2], [0:2]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
%reshape.3 = f32[2,2]{1,0} reshape(f32[1,2,2]{2,1,0} %slice.2), metadata={op_type="aten__view" op_name="aten__view" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
%compare.7 = pred[2,2]{1,0} compare(f32[2,2]{1,0} %reshape.6, f32[2,2]{1,0} %reshape.3), direction=EQ, metadata={op_type="aten__eq" op_name="aten__eq" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
%constant.9 = pred[] constant(true), metadata={op_type="aten__all" op_name="aten__all" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
%reduce.19 = pred[] reduce(pred[2,2]{1,0} %compare.7, pred[] %constant.9), dimensions={0,1}, to_apply=%AllComputation.10, metadata={op_type="aten__all" op_name="aten__all" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
%constant.20 = pred[] constant(false), metadata={op_type="aten__all" op_name="aten__all" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
%compare.21 = pred[] compare(pred[] %reduce.19, pred[] %constant.20), direction=NE, metadata={op_type="aten__all" op_name="aten__all" source_file="/pytorch/xla/test/test_while_loop.py" source_line=155}
ROOT %tuple.22 = (pred[]) tuple(pred[] %compare.21)
}
Graph Hash: b60e3643a9de50498a9709681c654048
## END_GRAPH
(torch310) root@b7b12c30e894:/pytorch/xla#
(torch310) root@b7b12c30e894:/pytorch/xla#
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment