Created
May 8, 2024 18:59
-
-
Save ManfeiBai/61eb8aaafa1d2a6f2ef4fae28f4d7cd7 to your computer and use it in GitHub Desktop.
one hlo
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
(base) root@t1v-n-0b41c02e-w-0:/# conda activate torch310 | |
(torch310) root@t1v-n-0b41c02e-w-0:/# cd ~/pytorch/xla/ | |
(torch310) root@t1v-n-0b41c02e-w-0:~/pytorch/xla# git branch | |
fori_loop_simple_case_test | |
fori_loop_simple_case_test_post_order | |
fori_loop_simple_case_test_test | |
fori_loop_simple_case_test_test_mnist_clean | |
* fori_loop_simple_case_testmaster | |
master | |
newapr23 | |
newtmpmay1 | |
newtmpmay7 | |
newtmpmay8 | |
openxlapinupdateapr25 | |
openxlapinupdateapr25again | |
tmpmay6 | |
(torch310) root@t1v-n-0b41c02e-w-0:~/pytorch/xla# rm -rf /tmp | |
(torch310) root@t1v-n-0b41c02e-w-0:~/pytorch/xla# XLA_IR_DEBUG=1 XLA_HLO_DEBUG=1 XLA_SAVE_TENSORS_FMT="hlo" XLA_SAVE_TENSORS_FILE="/tmp/save1.hlo" XLA_FLAGS=--xla_dump_to=/src/repo/hlogeneratedwhengenerateir PJRT_DEVICE=TPU python test/test_test_fori_loop.py | |
Could not access /tmp/tpu_logs: not found | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
------------simple linear one------------(original) | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
torch_add_res__: FunctionalTensor(lvl=0, value=\ | |
tensor([53], device='xla:0', dtype=torch.int32)) | |
res: FunctionalTensor(lvl=0, value=\ | |
tensor([ 8.6785, -12.2220, -18.3325, -11.5665, 0.7858, -28.8699, -6.9433, | |
-20.0388, 16.4420, -12.3062, 14.6674, 5.0801, -21.1780, -34.9048, | |
12.3117, -11.2889, 1.6557, -2.8843, -7.5593, 27.8263], | |
device='xla:0')) | |
expected: tensor([ 8.6785, -12.2220, -18.3325, -11.5665, 0.7858, -28.8699, -6.9433, | |
-20.0388, 16.4420, -12.3062, 14.6674, 5.0801, -21.1780, -34.9048, | |
12.3117, -11.2889, 1.6557, -2.8843, -7.5593, 27.8263], | |
device='xla:0') | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
Could not open any log file. | |
(torch310) root@t1v-n-0b41c02e-w-0:~/pytorch/xla# ls /tmp | |
ls: cannot access '/tmp': No such file or directory | |
(torch310) root@t1v-n-0b41c02e-w-0:~/pytorch/xla# cd /tmp | |
bash: cd: /tmp: No such file or directory | |
(torch310) root@t1v-n-0b41c02e-w-0:~/pytorch/xla# mkdir /tmp | |
(torch310) root@t1v-n-0b41c02e-w-0:~/pytorch/xla# XLA_IR_DEBUG=1 XLA_HLO_DEBUG=1 XLA_SAVE_TENSORS_FMT="hlo" XLA_SAVE_TENSORS_FILE="/tmp/save1.hlo" XLA_FLAGS=--xla_dump_to=/src/repo/hlogeneratedwhengenerateir PJRT_DEVICE=TPU python test/test_test_fori_loop.py | |
------------simple linear one------------(original) | |
torch_add_res__: FunctionalTensor(lvl=0, value=\ | |
tensor([53], device='xla:0', dtype=torch.int32)) | |
res: FunctionalTensor(lvl=0, value=\ | |
tensor([-13.2017, 5.3126, -2.0887, 42.4963, -2.8923, -5.4809, 4.4193, | |
27.6952, 4.9128, -11.5728, 12.1975, 26.9884, -7.1339, 14.2867, | |
-15.0189, 7.7009, -1.0184, 7.9056, 11.0350, 20.2705], | |
device='xla:0')) | |
expected: tensor([-13.2017, 5.3126, -2.0887, 42.4963, -2.8923, -5.4809, 4.4193, | |
27.6952, 4.9128, -11.5728, 12.1975, 26.9884, -7.1339, 14.2867, | |
-15.0189, 7.7009, -1.0184, 7.9056, 11.0350, 20.2705], | |
device='xla:0') | |
(torch310) root@t1v-n-0b41c02e-w-0:~/pytorch/xla# cat /tmp/save1.hlo.0 | |
[ScheduleSyncTensorsGraph] | |
TensorsGraphInfo: | |
_fake_fori_loop (/root/pytorch/xla/test/test_test_fori_loop.py:12) | |
<module> (/root/pytorch/xla/test/test_test_fori_loop.py:59) | |
Root Hashes: (cdf31edf0e61d230194e802e9ec7a48d) | |
## BEGIN_GRAPH | |
HloModule IrToHlo.10, entry_computation_layout={(s32[1]{0}, s32[1]{0})->(s32[])} | |
ENTRY %IrToHlo.10 (p0.2: s32[1], p1.3: s32[1]) -> (s32[]) { | |
%p1.3 = s32[1]{0} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=43} | |
%p0.2 = s32[1]{0} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=44} | |
%constant.1 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=12} | |
%broadcast.4 = s32[1]{0} broadcast(s32[] %constant.1), dimensions={}, metadata={op_type="aten__sub" op_name="aten__sub.10/aten__sub" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=12} | |
%multiply.5 = s32[1]{0} multiply(s32[1]{0} %p0.2, s32[1]{0} %broadcast.4), metadata={op_type="aten__sub" op_name="aten__sub.10/aten__sub" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=12} | |
%subtract.6 = s32[1]{0} subtract(s32[1]{0} %p1.3, s32[1]{0} %multiply.5), metadata={op_type="aten__sub" op_name="aten__sub.10/aten__sub" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=12} | |
%slice.7 = s32[1]{0} slice(s32[1]{0} %subtract.6), slice={[0:1]}, metadata={op_type="xla__generic_slice" op_name="xla__generic_slice" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=12} | |
%reshape.8 = s32[] reshape(s32[1]{0} %slice.7), metadata={op_type="aten__view" op_name="aten__view" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=12} | |
ROOT %tuple.9 = (s32[]) tuple(s32[] %reshape.8) | |
} | |
Graph Hash: 2d459e2f8cb1b1e9fdc7cf2bbd9c7efb | |
## END_GRAPH | |
[ScheduleSyncTensorsGraph] | |
TensorsGraphInfo: | |
_str_intern (/root/pytorch/torch/_tensor_str.py:432) | |
_str (/root/pytorch/torch/_tensor_str.py:697) | |
__repr__ (/root/pytorch/torch/_tensor.py:462) | |
_functorch_wrapper_str_intern (/root/pytorch/torch/_tensor_str.py:673) | |
_str_intern (/root/pytorch/torch/_tensor_str.py:392) | |
_str (/root/pytorch/torch/_tensor_str.py:697) | |
__repr__ (/root/pytorch/torch/_tensor.py:462) | |
<module> (/root/pytorch/xla/test/test_test_fori_loop.py:61) | |
Root Hashes: (f282fdfbc5c7637fc62cacde9bade651) | |
## BEGIN_GRAPH | |
HloModule IrToHlo.122, entry_computation_layout={(f32[20,10]{0,1}, f32[20]{0}, s64[], s32[1]{0}, s32[1]{0}, /*index=5*/s32[1]{0}, s32[1]{0})->(s32[1]{0})} | |
%PyLoweringContext.39 (p0.41: s32[1], p1.43: s32[1], p2.44: s32[1], p3.49: s32[1], p4.53: f32[10], p5.55: f32[20], p6.56: f32[20,10], UnusedArgumentsPlaceholder.40: f32[20]) -> (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20]) { | |
%UnusedArgumentsPlaceholder.40 = f32[20]{0} parameter(7) | |
%p0.41 = s32[1]{0} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%p2.44 = s32[1]{0} parameter(2), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%p1.43 = s32[1]{0} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%constant.42 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="<eval_with_key>.1" source_line=5} | |
%broadcast.45 = s32[1]{0} broadcast(s32[] %constant.42), dimensions={}, metadata={op_type="aten__add" op_name="aten__add.7/aten__add" source_file="<eval_with_key>.1" source_line=5} | |
%multiply.46 = s32[1]{0} multiply(s32[1]{0} %p1.43, s32[1]{0} %broadcast.45), metadata={op_type="aten__add" op_name="aten__add.7/aten__add" source_file="<eval_with_key>.1" source_line=5} | |
%add.47 = s32[1]{0} add(s32[1]{0} %p2.44, s32[1]{0} %multiply.46), metadata={op_type="aten__add" op_name="aten__add.7/aten__add" source_file="<eval_with_key>.1" source_line=5} | |
%p3.49 = s32[1]{0} parameter(3), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%constant.48 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="<eval_with_key>.1" source_line=10} | |
%broadcast.50 = s32[1]{0} broadcast(s32[] %constant.48), dimensions={}, metadata={op_type="aten__add" op_name="aten__add.9/aten__add" source_file="<eval_with_key>.1" source_line=10} | |
%multiply.51 = s32[1]{0} multiply(s32[1]{0} %p3.49, s32[1]{0} %broadcast.50), metadata={op_type="aten__add" op_name="aten__add.9/aten__add" source_file="<eval_with_key>.1" source_line=10} | |
%add.52 = s32[1]{0} add(s32[1]{0} %p2.44, s32[1]{0} %multiply.51), metadata={op_type="aten__add" op_name="aten__add.9/aten__add" source_file="<eval_with_key>.1" source_line=10} | |
%p4.53 = f32[10]{0} parameter(4), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%reshape.58 = f32[1,10]{1,0} reshape(f32[10]{0} %p4.53), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%p6.56 = f32[20,10]{0,1} parameter(6), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=82} | |
%transpose.57 = f32[10,20]{1,0} transpose(f32[20,10]{0,1} %p6.56), dimensions={1,0}, metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%dot.59 = f32[1,20]{1,0} dot(f32[1,10]{1,0} %reshape.58, f32[10,20]{1,0} %transpose.57), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="aten__mm" op_name="aten__mm" source_file="<eval_with_key>.1" source_line=6} | |
%reshape.60 = f32[20]{0} reshape(f32[1,20]{1,0} %dot.59), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%p5.55 = f32[20]{0} parameter(5), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=82} | |
%constant.54 = f32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="<eval_with_key>.1" source_line=6} | |
%broadcast.61 = f32[20]{0} broadcast(f32[] %constant.54), dimensions={}, metadata={op_type="aten__add" op_name="aten__add.8/aten__add" source_file="<eval_with_key>.1" source_line=6} | |
%multiply.62 = f32[20]{0} multiply(f32[20]{0} %p5.55, f32[20]{0} %broadcast.61), metadata={op_type="aten__add" op_name="aten__add.8/aten__add" source_file="<eval_with_key>.1" source_line=6} | |
%add.63 = f32[20]{0} add(f32[20]{0} %reshape.60, f32[20]{0} %multiply.62), metadata={op_type="aten__add" op_name="aten__add.8/aten__add" source_file="<eval_with_key>.1" source_line=6} | |
%reshape.64 = f32[1,20]{1,0} reshape(f32[20]{0} %add.63), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%reshape.65 = f32[20]{0} reshape(f32[1,20]{1,0} %reshape.64), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
ROOT %tuple.66 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) tuple(s32[1]{0} %p0.41, s32[1]{0} %add.47, s32[1]{0} %p2.44, s32[1]{0} %add.52, f32[10]{0} %p4.53, /*index=5*/f32[20]{0} %reshape.65, f32[20,10]{0,1} %p6.56, f32[20]{0} %p5.55) | |
} | |
%PyLoweringContext.28.67 (in.68: (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20])) -> (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20]) { | |
%in.68 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) parameter(0) | |
%get-tuple-element.69 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=0 | |
%get-tuple-element.70 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=1 | |
%get-tuple-element.71 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=2 | |
%get-tuple-element.72 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=3 | |
%get-tuple-element.73 = f32[10]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=4 | |
%get-tuple-element.74 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=5 | |
%get-tuple-element.75 = f32[20,10]{0,1} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=6 | |
%get-tuple-element.76 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=7 | |
ROOT %call.77 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) call(s32[1]{0} %get-tuple-element.69, s32[1]{0} %get-tuple-element.70, s32[1]{0} %get-tuple-element.71, s32[1]{0} %get-tuple-element.72, f32[10]{0} %get-tuple-element.73, /*index=5*/f32[20]{0} %get-tuple-element.74, f32[20,10]{0,1} %get-tuple-element.75, f32[20]{0} %get-tuple-element.76), to_apply=%PyLoweringContext.39 | |
} | |
%PyLoweringContext.78 (p0.85: s32[1], p1.87: s32[1], UnusedArgumentsPlaceholder.79: s32[1], UnusedArgumentsPlaceholder.80: s32[1], UnusedArgumentsPlaceholder.81: f32[10], UnusedArgumentsPlaceholder.82: f32[20], UnusedArgumentsPlaceholder.83: f32[20,10], UnusedArgumentsPlaceholder.84: f32[20]) -> pred[] { | |
%UnusedArgumentsPlaceholder.79 = s32[1]{0} parameter(2) | |
%UnusedArgumentsPlaceholder.80 = s32[1]{0} parameter(3) | |
%UnusedArgumentsPlaceholder.81 = f32[10]{0} parameter(4) | |
%UnusedArgumentsPlaceholder.82 = f32[20]{0} parameter(5) | |
%UnusedArgumentsPlaceholder.83 = f32[20,10]{0,1} parameter(6) | |
%UnusedArgumentsPlaceholder.84 = f32[20]{0} parameter(7) | |
%p1.87 = s32[1]{0} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%reshape.88 = s32[] reshape(s32[1]{0} %p1.87), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.0" source_line=5} | |
%p0.85 = s32[1]{0} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%reshape.86 = s32[] reshape(s32[1]{0} %p0.85), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.0" source_line=6} | |
ROOT %compare.89 = pred[] compare(s32[] %reshape.88, s32[] %reshape.86), direction=LT, metadata={op_type="aten__lt" op_name="aten__lt" source_file="<eval_with_key>.0" source_line=7} | |
} | |
%PyLoweringContext.12.90 (in.91: (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20])) -> pred[] { | |
%in.91 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) parameter(0) | |
%get-tuple-element.92 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=0 | |
%get-tuple-element.93 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=1 | |
%get-tuple-element.94 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=2 | |
%get-tuple-element.95 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=3 | |
%get-tuple-element.96 = f32[10]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=4 | |
%get-tuple-element.97 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=5 | |
%get-tuple-element.98 = f32[20,10]{0,1} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=6 | |
%get-tuple-element.99 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=7 | |
ROOT %call.100 = pred[] call(s32[1]{0} %get-tuple-element.92, s32[1]{0} %get-tuple-element.93, s32[1]{0} %get-tuple-element.94, s32[1]{0} %get-tuple-element.95, f32[10]{0} %get-tuple-element.96, /*index=5*/f32[20]{0} %get-tuple-element.97, f32[20,10]{0,1} %get-tuple-element.98, f32[20]{0} %get-tuple-element.99), to_apply=%PyLoweringContext.78 | |
} | |
%test_while.101 (p0.102: s32[1], p1.103: s32[1], p2.104: s32[1], p3.105: s32[1], p4.106: f32[10], p5.107: f32[20], p6.108: f32[20], p7.109: f32[20,10]) -> (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20]) { | |
%p0.102 = s32[1]{0} parameter(0) | |
%p1.103 = s32[1]{0} parameter(1) | |
%p2.104 = s32[1]{0} parameter(2) | |
%p3.105 = s32[1]{0} parameter(3) | |
%p4.106 = f32[10]{0} parameter(4) | |
%p6.108 = f32[20]{0} parameter(6) | |
%p7.109 = f32[20,10]{1,0} parameter(7) | |
%p5.107 = f32[20]{0} parameter(5) | |
%tuple.110 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) tuple(s32[1]{0} %p0.102, s32[1]{0} %p1.103, s32[1]{0} %p2.104, s32[1]{0} %p3.105, f32[10]{0} %p4.106, /*index=5*/f32[20]{0} %p6.108, f32[20,10]{1,0} %p7.109, f32[20]{0} %p5.107) | |
ROOT %while.111 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) while((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %tuple.110), condition=%PyLoweringContext.12.90, body=%PyLoweringContext.28.67 | |
} | |
ENTRY %IrToHlo.122 (p0.1: f32[20,10], p1.2: f32[20], p2.8: s64[], p3.35: s32[1], p4.36: s32[1], p5.37: s32[1], p6.38: s32[1]) -> (s32[1]) { | |
%constant.11 = s64[] constant(2531011), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.9 = s64[] constant(214013), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%p2.8 = s64[] parameter(2), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%multiply.10 = s64[] multiply(s64[] %constant.9, s64[] %p2.8), metadata={op_type="aten__mul" op_name="aten__mul" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%add.12 = s64[] add(s64[] %constant.11, s64[] %multiply.10), metadata={op_type="aten__add" op_name="aten__add" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%convert.15 = u64[] convert(s64[] %add.12), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%reshape.17 = u64[1]{0} reshape(u64[] %convert.15), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.16 = u64[] constant(0), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%reshape.18 = u64[1]{0} reshape(u64[] %constant.16), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%concatenate.19 = u64[2]{0} concatenate(u64[1]{0} %reshape.17, u64[1]{0} %reshape.18), dimensions={0}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%rng-bit-generator.20 = (u64[2]{0}, u32[10]{0}) rng-bit-generator(u64[2]{0} %concatenate.19), algorithm=rng_default, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%get-tuple-element.22 = u64[2]{0} get-tuple-element((u64[2]{0}, u32[10]{0}) %rng-bit-generator.20), index=0, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%p6.38 = s32[1]{0} parameter(6), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=43} | |
%p5.37 = s32[1]{0} parameter(5), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=44} | |
%p4.36 = s32[1]{0} parameter(4), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=45} | |
%p3.35 = s32[1]{0} parameter(3), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=46} | |
%get-tuple-element.21 = u32[10]{0} get-tuple-element((u64[2]{0}, u32[10]{0}) %rng-bit-generator.20), index=1, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.23 = u32[] constant(9), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.24 = u32[10]{0} broadcast(u32[] %constant.23), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%shift-right-logical.25 = u32[10]{0} shift-right-logical(u32[10]{0} %get-tuple-element.21, u32[10]{0} %broadcast.24), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%convert.26 = f32[10]{0} convert(u32[10]{0} %shift-right-logical.25), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.27 = f32[] constant(1.1920929e-07), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.28 = f32[10]{0} broadcast(f32[] %constant.27), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%multiply.29 = f32[10]{0} multiply(f32[10]{0} %convert.26, f32[10]{0} %broadcast.28), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.13 = f32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.14 = f32[] constant(0), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%subtract.30 = f32[] subtract(f32[] %constant.13, f32[] %constant.14), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.31 = f32[10]{0} broadcast(f32[] %subtract.30), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%multiply.32 = f32[10]{0} multiply(f32[10]{0} %multiply.29, f32[10]{0} %broadcast.31), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.33 = f32[10]{0} broadcast(f32[] %constant.14), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%add.34 = f32[10]{0} add(f32[10]{0} %multiply.32, f32[10]{0} %broadcast.33), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.3 = f32[] constant(0), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%reshape.4 = f32[1]{0} reshape(f32[] %constant.3), metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%broadcast.5 = f32[1]{0} broadcast(f32[1]{0} %reshape.4), dimensions={0}, metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%reshape.6 = f32[] reshape(f32[1]{0} %broadcast.5), metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%broadcast.7 = f32[20]{0} broadcast(f32[] %reshape.6), dimensions={}, metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%p1.2 = f32[20]{0} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/torch/nn/modules/module.py" source_line=1159} | |
%p0.1 = f32[20,10]{0,1} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/torch/nn/modules/module.py" source_line=1159} | |
%call.112 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) call(s32[1]{0} %p6.38, s32[1]{0} %p5.37, s32[1]{0} %p4.36, s32[1]{0} %p3.35, f32[10]{0} %add.34, /*index=5*/f32[20]{0} %broadcast.7, f32[20]{0} %p1.2, f32[20,10]{0,1} %p0.1), to_apply=%test_while.101, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.113 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=0, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.114 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=1, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.115 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=2, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.117 = f32[10]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=4, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.118 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=5, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.119 = f32[20,10]{1,0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=6, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.120 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=7, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.116 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=3, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
ROOT %tuple.121 = (s32[1]{0}) tuple(s32[1]{0} %get-tuple-element.116) | |
} | |
Graph Hash: a9f5bc548849e4e529f3b0ec296d61bc | |
## END_GRAPH | |
[ScheduleSyncTensorsGraph] | |
TensorsGraphInfo: | |
_str_intern (/root/pytorch/torch/_tensor_str.py:432) | |
_str (/root/pytorch/torch/_tensor_str.py:697) | |
__repr__ (/root/pytorch/torch/_tensor.py:462) | |
_functorch_wrapper_str_intern (/root/pytorch/torch/_tensor_str.py:673) | |
_str_intern (/root/pytorch/torch/_tensor_str.py:392) | |
_str (/root/pytorch/torch/_tensor_str.py:697) | |
__repr__ (/root/pytorch/torch/_tensor.py:462) | |
<module> (/root/pytorch/xla/test/test_test_fori_loop.py:62) | |
Root Hashes: (248a503d47853a256a1aa783ac13988d) | |
## BEGIN_GRAPH | |
HloModule IrToHlo.122, entry_computation_layout={(f32[20,10]{0,1}, f32[20]{0}, s64[], s32[1]{0}, s32[1]{0}, /*index=5*/s32[1]{0}, s32[1]{0})->(f32[20]{0})} | |
%PyLoweringContext.39 (p0.41: s32[1], p1.43: s32[1], p2.44: s32[1], p3.49: s32[1], p4.53: f32[10], p5.55: f32[20], p6.56: f32[20,10], UnusedArgumentsPlaceholder.40: f32[20]) -> (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20]) { | |
%UnusedArgumentsPlaceholder.40 = f32[20]{0} parameter(7) | |
%p0.41 = s32[1]{0} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%p2.44 = s32[1]{0} parameter(2), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%p1.43 = s32[1]{0} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%constant.42 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="<eval_with_key>.1" source_line=5} | |
%broadcast.45 = s32[1]{0} broadcast(s32[] %constant.42), dimensions={}, metadata={op_type="aten__add" op_name="aten__add.7/aten__add" source_file="<eval_with_key>.1" source_line=5} | |
%multiply.46 = s32[1]{0} multiply(s32[1]{0} %p1.43, s32[1]{0} %broadcast.45), metadata={op_type="aten__add" op_name="aten__add.7/aten__add" source_file="<eval_with_key>.1" source_line=5} | |
%add.47 = s32[1]{0} add(s32[1]{0} %p2.44, s32[1]{0} %multiply.46), metadata={op_type="aten__add" op_name="aten__add.7/aten__add" source_file="<eval_with_key>.1" source_line=5} | |
%p3.49 = s32[1]{0} parameter(3), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%constant.48 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="<eval_with_key>.1" source_line=10} | |
%broadcast.50 = s32[1]{0} broadcast(s32[] %constant.48), dimensions={}, metadata={op_type="aten__add" op_name="aten__add.9/aten__add" source_file="<eval_with_key>.1" source_line=10} | |
%multiply.51 = s32[1]{0} multiply(s32[1]{0} %p3.49, s32[1]{0} %broadcast.50), metadata={op_type="aten__add" op_name="aten__add.9/aten__add" source_file="<eval_with_key>.1" source_line=10} | |
%add.52 = s32[1]{0} add(s32[1]{0} %p2.44, s32[1]{0} %multiply.51), metadata={op_type="aten__add" op_name="aten__add.9/aten__add" source_file="<eval_with_key>.1" source_line=10} | |
%p4.53 = f32[10]{0} parameter(4), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%reshape.58 = f32[1,10]{1,0} reshape(f32[10]{0} %p4.53), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%p6.56 = f32[20,10]{0,1} parameter(6), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=82} | |
%transpose.57 = f32[10,20]{1,0} transpose(f32[20,10]{0,1} %p6.56), dimensions={1,0}, metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%dot.59 = f32[1,20]{1,0} dot(f32[1,10]{1,0} %reshape.58, f32[10,20]{1,0} %transpose.57), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="aten__mm" op_name="aten__mm" source_file="<eval_with_key>.1" source_line=6} | |
%reshape.60 = f32[20]{0} reshape(f32[1,20]{1,0} %dot.59), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%p5.55 = f32[20]{0} parameter(5), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=82} | |
%constant.54 = f32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="<eval_with_key>.1" source_line=6} | |
%broadcast.61 = f32[20]{0} broadcast(f32[] %constant.54), dimensions={}, metadata={op_type="aten__add" op_name="aten__add.8/aten__add" source_file="<eval_with_key>.1" source_line=6} | |
%multiply.62 = f32[20]{0} multiply(f32[20]{0} %p5.55, f32[20]{0} %broadcast.61), metadata={op_type="aten__add" op_name="aten__add.8/aten__add" source_file="<eval_with_key>.1" source_line=6} | |
%add.63 = f32[20]{0} add(f32[20]{0} %reshape.60, f32[20]{0} %multiply.62), metadata={op_type="aten__add" op_name="aten__add.8/aten__add" source_file="<eval_with_key>.1" source_line=6} | |
%reshape.64 = f32[1,20]{1,0} reshape(f32[20]{0} %add.63), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%reshape.65 = f32[20]{0} reshape(f32[1,20]{1,0} %reshape.64), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
ROOT %tuple.66 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) tuple(s32[1]{0} %p0.41, s32[1]{0} %add.47, s32[1]{0} %p2.44, s32[1]{0} %add.52, f32[10]{0} %p4.53, /*index=5*/f32[20]{0} %reshape.65, f32[20,10]{0,1} %p6.56, f32[20]{0} %p5.55) | |
} | |
%PyLoweringContext.28.67 (in.68: (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20])) -> (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20]) { | |
%in.68 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) parameter(0) | |
%get-tuple-element.69 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=0 | |
%get-tuple-element.70 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=1 | |
%get-tuple-element.71 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=2 | |
%get-tuple-element.72 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=3 | |
%get-tuple-element.73 = f32[10]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=4 | |
%get-tuple-element.74 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=5 | |
%get-tuple-element.75 = f32[20,10]{0,1} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=6 | |
%get-tuple-element.76 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.68), index=7 | |
ROOT %call.77 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) call(s32[1]{0} %get-tuple-element.69, s32[1]{0} %get-tuple-element.70, s32[1]{0} %get-tuple-element.71, s32[1]{0} %get-tuple-element.72, f32[10]{0} %get-tuple-element.73, /*index=5*/f32[20]{0} %get-tuple-element.74, f32[20,10]{0,1} %get-tuple-element.75, f32[20]{0} %get-tuple-element.76), to_apply=%PyLoweringContext.39 | |
} | |
%PyLoweringContext.78 (p0.85: s32[1], p1.87: s32[1], UnusedArgumentsPlaceholder.79: s32[1], UnusedArgumentsPlaceholder.80: s32[1], UnusedArgumentsPlaceholder.81: f32[10], UnusedArgumentsPlaceholder.82: f32[20], UnusedArgumentsPlaceholder.83: f32[20,10], UnusedArgumentsPlaceholder.84: f32[20]) -> pred[] { | |
%UnusedArgumentsPlaceholder.79 = s32[1]{0} parameter(2) | |
%UnusedArgumentsPlaceholder.80 = s32[1]{0} parameter(3) | |
%UnusedArgumentsPlaceholder.81 = f32[10]{0} parameter(4) | |
%UnusedArgumentsPlaceholder.82 = f32[20]{0} parameter(5) | |
%UnusedArgumentsPlaceholder.83 = f32[20,10]{0,1} parameter(6) | |
%UnusedArgumentsPlaceholder.84 = f32[20]{0} parameter(7) | |
%p1.87 = s32[1]{0} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%reshape.88 = s32[] reshape(s32[1]{0} %p1.87), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.0" source_line=5} | |
%p0.85 = s32[1]{0} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%reshape.86 = s32[] reshape(s32[1]{0} %p0.85), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.0" source_line=6} | |
ROOT %compare.89 = pred[] compare(s32[] %reshape.88, s32[] %reshape.86), direction=LT, metadata={op_type="aten__lt" op_name="aten__lt" source_file="<eval_with_key>.0" source_line=7} | |
} | |
%PyLoweringContext.12.90 (in.91: (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20])) -> pred[] { | |
%in.91 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) parameter(0) | |
%get-tuple-element.92 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=0 | |
%get-tuple-element.93 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=1 | |
%get-tuple-element.94 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=2 | |
%get-tuple-element.95 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=3 | |
%get-tuple-element.96 = f32[10]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=4 | |
%get-tuple-element.97 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=5 | |
%get-tuple-element.98 = f32[20,10]{0,1} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=6 | |
%get-tuple-element.99 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.91), index=7 | |
ROOT %call.100 = pred[] call(s32[1]{0} %get-tuple-element.92, s32[1]{0} %get-tuple-element.93, s32[1]{0} %get-tuple-element.94, s32[1]{0} %get-tuple-element.95, f32[10]{0} %get-tuple-element.96, /*index=5*/f32[20]{0} %get-tuple-element.97, f32[20,10]{0,1} %get-tuple-element.98, f32[20]{0} %get-tuple-element.99), to_apply=%PyLoweringContext.78 | |
} | |
%test_while.101 (p0.102: s32[1], p1.103: s32[1], p2.104: s32[1], p3.105: s32[1], p4.106: f32[10], p5.107: f32[20], p6.108: f32[20], p7.109: f32[20,10]) -> (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20]) { | |
%p0.102 = s32[1]{0} parameter(0) | |
%p1.103 = s32[1]{0} parameter(1) | |
%p2.104 = s32[1]{0} parameter(2) | |
%p3.105 = s32[1]{0} parameter(3) | |
%p4.106 = f32[10]{0} parameter(4) | |
%p6.108 = f32[20]{0} parameter(6) | |
%p7.109 = f32[20,10]{1,0} parameter(7) | |
%p5.107 = f32[20]{0} parameter(5) | |
%tuple.110 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) tuple(s32[1]{0} %p0.102, s32[1]{0} %p1.103, s32[1]{0} %p2.104, s32[1]{0} %p3.105, f32[10]{0} %p4.106, /*index=5*/f32[20]{0} %p6.108, f32[20,10]{1,0} %p7.109, f32[20]{0} %p5.107) | |
ROOT %while.111 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) while((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %tuple.110), condition=%PyLoweringContext.12.90, body=%PyLoweringContext.28.67 | |
} | |
ENTRY %IrToHlo.122 (p0.1: f32[20,10], p1.2: f32[20], p2.8: s64[], p3.35: s32[1], p4.36: s32[1], p5.37: s32[1], p6.38: s32[1]) -> (f32[20]) { | |
%constant.11 = s64[] constant(2531011), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.9 = s64[] constant(214013), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%p2.8 = s64[] parameter(2), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%multiply.10 = s64[] multiply(s64[] %constant.9, s64[] %p2.8), metadata={op_type="aten__mul" op_name="aten__mul" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%add.12 = s64[] add(s64[] %constant.11, s64[] %multiply.10), metadata={op_type="aten__add" op_name="aten__add" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%convert.15 = u64[] convert(s64[] %add.12), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%reshape.17 = u64[1]{0} reshape(u64[] %convert.15), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.16 = u64[] constant(0), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%reshape.18 = u64[1]{0} reshape(u64[] %constant.16), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%concatenate.19 = u64[2]{0} concatenate(u64[1]{0} %reshape.17, u64[1]{0} %reshape.18), dimensions={0}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%rng-bit-generator.20 = (u64[2]{0}, u32[10]{0}) rng-bit-generator(u64[2]{0} %concatenate.19), algorithm=rng_default, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%get-tuple-element.22 = u64[2]{0} get-tuple-element((u64[2]{0}, u32[10]{0}) %rng-bit-generator.20), index=0, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%p6.38 = s32[1]{0} parameter(6), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=43} | |
%p5.37 = s32[1]{0} parameter(5), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=44} | |
%p4.36 = s32[1]{0} parameter(4), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=45} | |
%p3.35 = s32[1]{0} parameter(3), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=46} | |
%get-tuple-element.21 = u32[10]{0} get-tuple-element((u64[2]{0}, u32[10]{0}) %rng-bit-generator.20), index=1, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.23 = u32[] constant(9), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.24 = u32[10]{0} broadcast(u32[] %constant.23), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%shift-right-logical.25 = u32[10]{0} shift-right-logical(u32[10]{0} %get-tuple-element.21, u32[10]{0} %broadcast.24), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%convert.26 = f32[10]{0} convert(u32[10]{0} %shift-right-logical.25), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.27 = f32[] constant(1.1920929e-07), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.28 = f32[10]{0} broadcast(f32[] %constant.27), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%multiply.29 = f32[10]{0} multiply(f32[10]{0} %convert.26, f32[10]{0} %broadcast.28), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.13 = f32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.14 = f32[] constant(0), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%subtract.30 = f32[] subtract(f32[] %constant.13, f32[] %constant.14), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.31 = f32[10]{0} broadcast(f32[] %subtract.30), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%multiply.32 = f32[10]{0} multiply(f32[10]{0} %multiply.29, f32[10]{0} %broadcast.31), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.33 = f32[10]{0} broadcast(f32[] %constant.14), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%add.34 = f32[10]{0} add(f32[10]{0} %multiply.32, f32[10]{0} %broadcast.33), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.3 = f32[] constant(0), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%reshape.4 = f32[1]{0} reshape(f32[] %constant.3), metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%broadcast.5 = f32[1]{0} broadcast(f32[1]{0} %reshape.4), dimensions={0}, metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%reshape.6 = f32[] reshape(f32[1]{0} %broadcast.5), metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%broadcast.7 = f32[20]{0} broadcast(f32[] %reshape.6), dimensions={}, metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%p1.2 = f32[20]{0} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/torch/nn/modules/module.py" source_line=1159} | |
%p0.1 = f32[20,10]{0,1} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/torch/nn/modules/module.py" source_line=1159} | |
%call.112 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) call(s32[1]{0} %p6.38, s32[1]{0} %p5.37, s32[1]{0} %p4.36, s32[1]{0} %p3.35, f32[10]{0} %add.34, /*index=5*/f32[20]{0} %broadcast.7, f32[20]{0} %p1.2, f32[20,10]{0,1} %p0.1), to_apply=%test_while.101, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.113 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=0, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.114 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=1, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.115 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=2, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.116 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=3, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.117 = f32[10]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=4, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.119 = f32[20,10]{1,0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=6, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.120 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=7, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.118 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.112), index=5, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
ROOT %tuple.121 = (f32[20]{0}) tuple(f32[20]{0} %get-tuple-element.118) | |
} | |
Graph Hash: f4f17d4ce372d561656d4159514906bd | |
## END_GRAPH | |
[ScheduleSyncTensorsGraph] | |
TensorsGraphInfo: | |
_str_intern (/root/pytorch/torch/_tensor_str.py:432) | |
_str (/root/pytorch/torch/_tensor_str.py:697) | |
__repr__ (/root/pytorch/torch/_tensor.py:462) | |
<module> (/root/pytorch/xla/test/test_test_fori_loop.py:63) | |
Root Hashes: (7477ce2004e71cd58d97df1103837cd) | |
## BEGIN_GRAPH | |
HloModule IrToHlo.130, entry_computation_layout={(f32[20,10]{0,1}, f32[20]{0}, s64[], s32[1]{0}, s32[1]{0}, /*index=5*/s32[1]{0}, s32[1]{0})->(f32[20]{0})} | |
%PyLoweringContext.40 (p0.42: s32[1], p1.44: s32[1], p2.45: s32[1], p3.50: s32[1], p4.54: f32[10], p5.56: f32[20], p6.57: f32[20,10], UnusedArgumentsPlaceholder.41: f32[20]) -> (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20]) { | |
%UnusedArgumentsPlaceholder.41 = f32[20]{0} parameter(7) | |
%p0.42 = s32[1]{0} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%p2.45 = s32[1]{0} parameter(2), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%p1.44 = s32[1]{0} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%constant.43 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="<eval_with_key>.1" source_line=5} | |
%broadcast.46 = s32[1]{0} broadcast(s32[] %constant.43), dimensions={}, metadata={op_type="aten__add" op_name="aten__add.7/aten__add" source_file="<eval_with_key>.1" source_line=5} | |
%multiply.47 = s32[1]{0} multiply(s32[1]{0} %p1.44, s32[1]{0} %broadcast.46), metadata={op_type="aten__add" op_name="aten__add.7/aten__add" source_file="<eval_with_key>.1" source_line=5} | |
%add.48 = s32[1]{0} add(s32[1]{0} %p2.45, s32[1]{0} %multiply.47), metadata={op_type="aten__add" op_name="aten__add.7/aten__add" source_file="<eval_with_key>.1" source_line=5} | |
%p3.50 = s32[1]{0} parameter(3), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%constant.49 = s32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="<eval_with_key>.1" source_line=10} | |
%broadcast.51 = s32[1]{0} broadcast(s32[] %constant.49), dimensions={}, metadata={op_type="aten__add" op_name="aten__add.9/aten__add" source_file="<eval_with_key>.1" source_line=10} | |
%multiply.52 = s32[1]{0} multiply(s32[1]{0} %p3.50, s32[1]{0} %broadcast.51), metadata={op_type="aten__add" op_name="aten__add.9/aten__add" source_file="<eval_with_key>.1" source_line=10} | |
%add.53 = s32[1]{0} add(s32[1]{0} %p2.45, s32[1]{0} %multiply.52), metadata={op_type="aten__add" op_name="aten__add.9/aten__add" source_file="<eval_with_key>.1" source_line=10} | |
%p4.54 = f32[10]{0} parameter(4), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%reshape.59 = f32[1,10]{1,0} reshape(f32[10]{0} %p4.54), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%p6.57 = f32[20,10]{0,1} parameter(6), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=82} | |
%transpose.58 = f32[10,20]{1,0} transpose(f32[20,10]{0,1} %p6.57), dimensions={1,0}, metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%dot.60 = f32[1,20]{1,0} dot(f32[1,10]{1,0} %reshape.59, f32[10,20]{1,0} %transpose.58), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="aten__mm" op_name="aten__mm" source_file="<eval_with_key>.1" source_line=6} | |
%reshape.61 = f32[20]{0} reshape(f32[1,20]{1,0} %dot.60), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%p5.56 = f32[20]{0} parameter(5), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=82} | |
%constant.55 = f32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="<eval_with_key>.1" source_line=6} | |
%broadcast.62 = f32[20]{0} broadcast(f32[] %constant.55), dimensions={}, metadata={op_type="aten__add" op_name="aten__add.8/aten__add" source_file="<eval_with_key>.1" source_line=6} | |
%multiply.63 = f32[20]{0} multiply(f32[20]{0} %p5.56, f32[20]{0} %broadcast.62), metadata={op_type="aten__add" op_name="aten__add.8/aten__add" source_file="<eval_with_key>.1" source_line=6} | |
%add.64 = f32[20]{0} add(f32[20]{0} %reshape.61, f32[20]{0} %multiply.63), metadata={op_type="aten__add" op_name="aten__add.8/aten__add" source_file="<eval_with_key>.1" source_line=6} | |
%reshape.65 = f32[1,20]{1,0} reshape(f32[20]{0} %add.64), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
%reshape.66 = f32[20]{0} reshape(f32[1,20]{1,0} %reshape.65), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.1" source_line=6} | |
ROOT %tuple.67 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) tuple(s32[1]{0} %p0.42, s32[1]{0} %add.48, s32[1]{0} %p2.45, s32[1]{0} %add.53, f32[10]{0} %p4.54, /*index=5*/f32[20]{0} %reshape.66, f32[20,10]{0,1} %p6.57, f32[20]{0} %p5.56) | |
} | |
%PyLoweringContext.28.68 (in.69: (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20])) -> (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20]) { | |
%in.69 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) parameter(0) | |
%get-tuple-element.70 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.69), index=0 | |
%get-tuple-element.71 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.69), index=1 | |
%get-tuple-element.72 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.69), index=2 | |
%get-tuple-element.73 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.69), index=3 | |
%get-tuple-element.74 = f32[10]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.69), index=4 | |
%get-tuple-element.75 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.69), index=5 | |
%get-tuple-element.76 = f32[20,10]{0,1} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.69), index=6 | |
%get-tuple-element.77 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.69), index=7 | |
ROOT %call.78 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) call(s32[1]{0} %get-tuple-element.70, s32[1]{0} %get-tuple-element.71, s32[1]{0} %get-tuple-element.72, s32[1]{0} %get-tuple-element.73, f32[10]{0} %get-tuple-element.74, /*index=5*/f32[20]{0} %get-tuple-element.75, f32[20,10]{0,1} %get-tuple-element.76, f32[20]{0} %get-tuple-element.77), to_apply=%PyLoweringContext.40 | |
} | |
%PyLoweringContext.79 (p0.86: s32[1], p1.88: s32[1], UnusedArgumentsPlaceholder.80: s32[1], UnusedArgumentsPlaceholder.81: s32[1], UnusedArgumentsPlaceholder.82: f32[10], UnusedArgumentsPlaceholder.83: f32[20], UnusedArgumentsPlaceholder.84: f32[20,10], UnusedArgumentsPlaceholder.85: f32[20]) -> pred[] { | |
%UnusedArgumentsPlaceholder.80 = s32[1]{0} parameter(2) | |
%UnusedArgumentsPlaceholder.81 = s32[1]{0} parameter(3) | |
%UnusedArgumentsPlaceholder.82 = f32[10]{0} parameter(4) | |
%UnusedArgumentsPlaceholder.83 = f32[20]{0} parameter(5) | |
%UnusedArgumentsPlaceholder.84 = f32[20,10]{0,1} parameter(6) | |
%UnusedArgumentsPlaceholder.85 = f32[20]{0} parameter(7) | |
%p1.88 = s32[1]{0} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%reshape.89 = s32[] reshape(s32[1]{0} %p1.88), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.0" source_line=5} | |
%p0.86 = s32[1]{0} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=76} | |
%reshape.87 = s32[] reshape(s32[1]{0} %p0.86), metadata={op_type="aten__as_strided" op_name="aten__as_strided" source_file="<eval_with_key>.0" source_line=6} | |
ROOT %compare.90 = pred[] compare(s32[] %reshape.89, s32[] %reshape.87), direction=LT, metadata={op_type="aten__lt" op_name="aten__lt" source_file="<eval_with_key>.0" source_line=7} | |
} | |
%PyLoweringContext.12.91 (in.92: (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20])) -> pred[] { | |
%in.92 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) parameter(0) | |
%get-tuple-element.93 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.92), index=0 | |
%get-tuple-element.94 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.92), index=1 | |
%get-tuple-element.95 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.92), index=2 | |
%get-tuple-element.96 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.92), index=3 | |
%get-tuple-element.97 = f32[10]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.92), index=4 | |
%get-tuple-element.98 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.92), index=5 | |
%get-tuple-element.99 = f32[20,10]{0,1} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.92), index=6 | |
%get-tuple-element.100 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{0,1}, f32[20]{0}) %in.92), index=7 | |
ROOT %call.101 = pred[] call(s32[1]{0} %get-tuple-element.93, s32[1]{0} %get-tuple-element.94, s32[1]{0} %get-tuple-element.95, s32[1]{0} %get-tuple-element.96, f32[10]{0} %get-tuple-element.97, /*index=5*/f32[20]{0} %get-tuple-element.98, f32[20,10]{0,1} %get-tuple-element.99, f32[20]{0} %get-tuple-element.100), to_apply=%PyLoweringContext.79 | |
} | |
%test_while.102 (p0.103: s32[1], p1.104: s32[1], p2.105: s32[1], p3.106: s32[1], p4.107: f32[10], p5.108: f32[20], p6.109: f32[20], p7.110: f32[20,10]) -> (s32[1], s32[1], s32[1], s32[1], f32[10], /*index=5*/f32[20], f32[20,10], f32[20]) { | |
%p0.103 = s32[1]{0} parameter(0) | |
%p1.104 = s32[1]{0} parameter(1) | |
%p2.105 = s32[1]{0} parameter(2) | |
%p3.106 = s32[1]{0} parameter(3) | |
%p4.107 = f32[10]{0} parameter(4) | |
%p6.109 = f32[20]{0} parameter(6) | |
%p7.110 = f32[20,10]{1,0} parameter(7) | |
%p5.108 = f32[20]{0} parameter(5) | |
%tuple.111 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) tuple(s32[1]{0} %p0.103, s32[1]{0} %p1.104, s32[1]{0} %p2.105, s32[1]{0} %p3.106, f32[10]{0} %p4.107, /*index=5*/f32[20]{0} %p6.109, f32[20,10]{1,0} %p7.110, f32[20]{0} %p5.108) | |
ROOT %while.112 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) while((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %tuple.111), condition=%PyLoweringContext.12.91, body=%PyLoweringContext.28.68 | |
} | |
ENTRY %IrToHlo.130 (p0.2: f32[20,10], p1.3: f32[20], p2.9: s64[], p3.36: s32[1], p4.37: s32[1], p5.38: s32[1], p6.39: s32[1]) -> (f32[20]) { | |
%constant.12 = s64[] constant(2531011), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.10 = s64[] constant(214013), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%p2.9 = s64[] parameter(2), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%multiply.11 = s64[] multiply(s64[] %constant.10, s64[] %p2.9), metadata={op_type="aten__mul" op_name="aten__mul" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%add.13 = s64[] add(s64[] %constant.12, s64[] %multiply.11), metadata={op_type="aten__add" op_name="aten__add" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%convert.16 = u64[] convert(s64[] %add.13), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%reshape.18 = u64[1]{0} reshape(u64[] %convert.16), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.17 = u64[] constant(0), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%reshape.19 = u64[1]{0} reshape(u64[] %constant.17), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%concatenate.20 = u64[2]{0} concatenate(u64[1]{0} %reshape.18, u64[1]{0} %reshape.19), dimensions={0}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%rng-bit-generator.21 = (u64[2]{0}, u32[10]{0}) rng-bit-generator(u64[2]{0} %concatenate.20), algorithm=rng_default, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%get-tuple-element.23 = u64[2]{0} get-tuple-element((u64[2]{0}, u32[10]{0}) %rng-bit-generator.21), index=0, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%p6.39 = s32[1]{0} parameter(6), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=43} | |
%p5.38 = s32[1]{0} parameter(5), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=44} | |
%p4.37 = s32[1]{0} parameter(4), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=45} | |
%p3.36 = s32[1]{0} parameter(3), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=46} | |
%get-tuple-element.22 = u32[10]{0} get-tuple-element((u64[2]{0}, u32[10]{0}) %rng-bit-generator.21), index=1, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.24 = u32[] constant(9), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.25 = u32[10]{0} broadcast(u32[] %constant.24), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%shift-right-logical.26 = u32[10]{0} shift-right-logical(u32[10]{0} %get-tuple-element.22, u32[10]{0} %broadcast.25), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%convert.27 = f32[10]{0} convert(u32[10]{0} %shift-right-logical.26), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.28 = f32[] constant(1.1920929e-07), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.29 = f32[10]{0} broadcast(f32[] %constant.28), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%multiply.30 = f32[10]{0} multiply(f32[10]{0} %convert.27, f32[10]{0} %broadcast.29), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.14 = f32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.15 = f32[] constant(0), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%subtract.31 = f32[] subtract(f32[] %constant.14, f32[] %constant.15), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.32 = f32[10]{0} broadcast(f32[] %subtract.31), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%multiply.33 = f32[10]{0} multiply(f32[10]{0} %multiply.30, f32[10]{0} %broadcast.32), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%broadcast.34 = f32[10]{0} broadcast(f32[] %constant.15), dimensions={}, metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%add.35 = f32[10]{0} add(f32[10]{0} %multiply.33, f32[10]{0} %broadcast.34), metadata={op_type="aten__uniform" op_name="aten__uniform" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=47} | |
%constant.4 = f32[] constant(0), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%reshape.5 = f32[1]{0} reshape(f32[] %constant.4), metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%broadcast.6 = f32[1]{0} broadcast(f32[1]{0} %reshape.5), dimensions={0}, metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%reshape.7 = f32[] reshape(f32[1]{0} %broadcast.6), metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%broadcast.8 = f32[20]{0} broadcast(f32[] %reshape.7), dimensions={}, metadata={op_type="aten__expand" op_name="aten__expand" source_file="/root/pytorch/xla/test/test_test_fori_loop.py" source_line=48} | |
%p1.3 = f32[20]{0} parameter(1), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/torch/nn/modules/module.py" source_line=1159} | |
%p0.2 = f32[20,10]{0,1} parameter(0), metadata={op_type="xla__device_data" op_name="xla__device_data" source_file="/root/pytorch/torch/nn/modules/module.py" source_line=1159} | |
%call.113 = (s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) call(s32[1]{0} %p6.39, s32[1]{0} %p5.38, s32[1]{0} %p4.37, s32[1]{0} %p3.36, f32[10]{0} %add.35, /*index=5*/f32[20]{0} %broadcast.8, f32[20]{0} %p1.3, f32[20,10]{0,1} %p0.2), to_apply=%test_while.102, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.114 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.113), index=0, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.115 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.113), index=1, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.116 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.113), index=2, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.117 = s32[1]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.113), index=3, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.118 = f32[10]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.113), index=4, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%get-tuple-element.119 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.113), index=5, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%reshape.123 = f32[1,10]{1,0} reshape(f32[10]{0} %add.35), metadata={op_type="aten__view" op_name="aten__view" source_file="/root/pytorch/torch/nn/modules/linear.py" source_line=116} | |
%get-tuple-element.120 = f32[20,10]{1,0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.113), index=6, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%transpose.122 = f32[10,20]{0,1} transpose(f32[20,10]{1,0} %get-tuple-element.120), dimensions={1,0}, metadata={op_type="aten__permute" op_name="aten__permute" source_file="/root/pytorch/torch/nn/modules/linear.py" source_line=116} | |
%dot.124 = f32[1,20]{1,0} dot(f32[1,10]{1,0} %reshape.123, f32[10,20]{0,1} %transpose.122), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="aten__mm" op_name="aten__mm" source_file="/root/pytorch/torch/nn/modules/linear.py" source_line=116} | |
%reshape.125 = f32[20]{0} reshape(f32[1,20]{1,0} %dot.124), metadata={op_type="aten__view" op_name="aten__view" source_file="/root/pytorch/torch/nn/modules/linear.py" source_line=116} | |
%get-tuple-element.121 = f32[20]{0} get-tuple-element((s32[1]{0}, s32[1]{0}, s32[1]{0}, s32[1]{0}, f32[10]{0}, /*index=5*/f32[20]{0}, f32[20,10]{1,0}, f32[20]{0}) %call.113), index=7, metadata={op_type="xla___op_test_while" op_name="xla___op_test_while" source_file="/root/pytorch/xla/torch_xla/experimental/fori_loop.py" source_line=152} | |
%constant.1 = f32[] constant(1), metadata={op_type="prim__Constant" op_name="prim__Constant" source_file="/root/pytorch/torch/nn/modules/linear.py" source_line=116} | |
%broadcast.126 = f32[20]{0} broadcast(f32[] %constant.1), dimensions={}, metadata={op_type="aten__add" op_name="aten__add.62/aten__add" source_file="/root/pytorch/torch/nn/modules/linear.py" source_line=116} | |
%multiply.127 = f32[20]{0} multiply(f32[20]{0} %get-tuple-element.121, f32[20]{0} %broadcast.126), metadata={op_type="aten__add" op_name="aten__add.62/aten__add" source_file="/root/pytorch/torch/nn/modules/linear.py" source_line=116} | |
%add.128 = f32[20]{0} add(f32[20]{0} %reshape.125, f32[20]{0} %multiply.127), metadata={op_type="aten__add" op_name="aten__add.62/aten__add" source_file="/root/pytorch/torch/nn/modules/linear.py" source_line=116} | |
ROOT %tuple.129 = (f32[20]{0}) tuple(f32[20]{0} %add.128) | |
} | |
Graph Hash: ddfe4f6f6877d38fcadd799f1f67cc2c | |
## END_GRAPH | |
(torch310) root@t1v-n-0b41c02e-w-0:~/pytorch/xla# rm -rf /tmp/ |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment