Skip to content

Instantly share code, notes, and snippets.

@ezyang
Created October 31, 2022 03:51
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ezyang/b7f473fd34d9bd96a935c538d80885ad to your computer and use it in GitHub Desktop.
Save ezyang/b7f473fd34d9bd96a935c538d80885ad to your computer and use it in GitHub Desktop.
Sweep logs for symbolic-shapes --accuracy --backend inductor (TORCHDYNAMO_DYNAMIC_SHAPES=1) - Sun Oct 30 17:53:58 PDT 2022
This file has been truncated, but you can view the full file.
Running BERT_pytorch...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/bert.py", line 43, in forward
x = self.embedding(x, segment_info)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/embedding/bert.py", line 32, in forward
x = self.token(sequence) + self.position(sequence) + self.segment(segment_label)
File "/data/users/ezyang/benchmark/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/embedding/bert.py", line 32, in <graph break in forward>
x = self.token(sequence) + self.position(sequence) + self.segment(segment_label)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda eval BERT_pytorch FAIL
Running Background_Matting...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 256.000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 256.000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 256.000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_108,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'mod': <class 'torchbenchmark.models.Background_Matting.networks.ResnetConditionHR'>, 'mod_model_al_out': <class 'torch.nn.modules.container.Sequential'>, 'mod_model_al_out_0': <class 'torch.nn.modules.upsampling.Upsample'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/Background_Matting/networks.py", line 104, in forward
al_out=self.model_al_out(out_dec_al)
| File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 256.000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 256.000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 328, in forward_pass
def forward_pass(self, mod, inputs, collect_outputs=True):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 256.000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 256.000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 256.000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_108,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'mod': <class 'torchbenchmark.models.Background_Matting.networks.ResnetConditionHR'>, 'mod_model_al_out': <class 'torch.nn.modules.container.Sequential'>, 'mod_model_al_out_0': <class 'torch.nn.modules.upsampling.Upsample'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/Background_Matting/networks.py", line 104, in forward
al_out=self.model_al_out(out_dec_al)
| File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
TorchDynamo optimized model failed to run because of following error
cuda eval Background_Matting FAIL
Running DALLE2_pytorch...
WARNING:common:fp64 golden ref were not generated for DALLE2_pytorch
[2022-10-30 17:55:36,410] torch._inductor.graph: [WARNING] Creating implicit fallback for:
target: <built-in function eq>
args[0]: s0
args[1]: 1
ERROR:common:AttributeError: 'bool' object has no attribute 'device'
target: <built-in function eq>
args[0]: s0
args[1]: 1
While executing %eq : [#users=1] = call_function[target=operator.eq](args = (%sym_size, 1), kwargs = {})
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2922, in create
example_output.device,
AttributeError: 'bool' object has no attribute 'device'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/dalle2_pytorch/dalle2_pytorch.py", line 96, in inner
model.eval()
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/dalle2_pytorch/dalle2_pytorch.py", line 97, in <graph break in inner>
out = fn(model, *args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/dalle2_pytorch/dalle2_pytorch.py", line 3231, in forward
@torch.no_grad()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'bool' object has no attribute 'device'
target: <built-in function eq>
args[0]: s0
args[1]: 1
While executing %eq : [#users=1] = call_function[target=operator.eq](args = (%sym_size, 1), kwargs = {})
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval DALLE2_pytorch FAIL
Running LearningToPaint...
cuda eval LearningToPaint PASS
Running Super_SloMo...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 22.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 22.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 22.0000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_12,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'mod': <class 'torchbenchmark.models.Super_SloMo.model_wrapper.Model'>, 'mod_flowComp': <class 'torchbenchmark.models.Super_SloMo.slomo_model.UNet'>, 'mod_flowComp_up1': <class 'torchbenchmark.models.Super_SloMo.slomo_model.up'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/slomo_model.py", line 130, in forward
x = F.interpolate(x, scale_factor=2., mode='bilinear')
| File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/slomo_model.py", line 204, in forward
x = self.up1(x, s5)
| File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/model_wrapper.py", line 28, in forward
flowOut = self.flowComp(torch.cat((I0, I1), dim=1))
| File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 22.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 22.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 328, in forward_pass
def forward_pass(self, mod, inputs, collect_outputs=True):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 22.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 22.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 22.0000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_12,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'mod': <class 'torchbenchmark.models.Super_SloMo.model_wrapper.Model'>, 'mod_flowComp': <class 'torchbenchmark.models.Super_SloMo.slomo_model.UNet'>, 'mod_flowComp_up1': <class 'torchbenchmark.models.Super_SloMo.slomo_model.up'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/slomo_model.py", line 130, in forward
x = F.interpolate(x, scale_factor=2., mode='bilinear')
| File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/slomo_model.py", line 204, in forward
x = self.up1(x, s5)
| File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/model_wrapper.py", line 28, in forward
flowOut = self.flowComp(torch.cat((I0, I1), dim=1))
| File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
TorchDynamo optimized model failed to run because of following error
cuda eval Super_SloMo FAIL
Running alexnet...
cuda eval alexnet PASS
Running attention_is_all_you_need_pytorch...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/attention_is_all_you_need_pytorch/transformer/Models.py", line 171, in forward
enc_output, *_ = self.encoder(src_seq, src_mask)
File "/data/users/ezyang/benchmark/torchbenchmark/models/attention_is_all_you_need_pytorch/transformer/Models.py", line 172, in <graph break in forward>
dec_output, *_ = self.decoder(trg_seq, trg_mask, enc_output, src_mask)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/attention_is_all_you_need_pytorch/transformer/Models.py", line 106, in forward
dec_output = self.dropout(self.position_enc(self.trg_word_emb(trg_seq)))
File "/data/users/ezyang/benchmark/torchbenchmark/models/attention_is_all_you_need_pytorch/transformer/Models.py", line 106, in <graph break in forward>
dec_output = self.dropout(self.position_enc(self.trg_word_emb(trg_seq)))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda eval attention_is_all_you_need_pytorch FAIL
Running dcgan...
cuda eval dcgan PASS
Running densenet121...
cuda eval densenet121 PASS
Running detectron2_fasterrcnn_r_101_c4...
[2022-10-30 18:02:10,933] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:10,933] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:12,471] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:12,471] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:13,080] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:13,080] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:14,087] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:14,087] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:14,982] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:14,982] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:15,365] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:15,365] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:15,591] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:15,591] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:16,482] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:16,482] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:16,849] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:16,849] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:17,080] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:17,080] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:18,575] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:18,575] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:18,941] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:18,941] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:20,685] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:20,685] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:21,078] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:21,078] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:21,321] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:21,322] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:22,140] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:22,140] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:22,530] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:22,530] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:22,781] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:22,782] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:23,612] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:23,612] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:24,015] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:24,015] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:24,259] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:24,259] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:25,608] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:25,608] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:25,978] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:25,979] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:27,731] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:27,731] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:28,094] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:28,094] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:28,318] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:28,318] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:28,829] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:28,829] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:29,451] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:29,451] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:29,674] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:29,674] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:30,177] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:30,177] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:30,525] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:30,525] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:30,749] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:30,749] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:31,544] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:31,544] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:31,910] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:31,910] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:32,140] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:32,140] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:32,656] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:32,656] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:33,308] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:33,308] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:33,534] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:33,534] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:34,054] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:34,054] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:34,420] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:34,421] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:34,643] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:34,643] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:35,445] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:35,446] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:35,797] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:35,797] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:36,022] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:36,022] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:36,543] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:36,543] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:37,198] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:37,199] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:37,429] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:37,429] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:37,958] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:37,958] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:38,334] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:38,334] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:38,562] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:38,563] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:39,371] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:39,372] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:39,732] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:39,732] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:39,955] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:39,956] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:40,470] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:40,470] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:41,118] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:41,118] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:41,336] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:41,336] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:41,859] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:41,859] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:42,221] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:42,221] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:42,444] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:42,444] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:42,888] torch._dynamo.convert_frame: [WARNING] torch._dynamo hit config.cache_size_limit (64)
function: 'forward' (/data/users/ezyang/detectron2/detectron2/layers/batch_norm.py:44)
reasons: ['___check_obj_id(self, 139733266684656)']
to diagnose recompilation issues, see https://github.com/pytorch/torchdynamo/blob/main/TROUBLESHOOTING.md.
[2022-10-30 18:02:43,050] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:43,050] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:43,210] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:43,210] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:43,232] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:43,232] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:43,536] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:43,536] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:43,982] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:43,982] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:44,005] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:44,005] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:44,305] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:44,305] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:44,461] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:44,461] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:44,483] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:44,483] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:44,774] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:44,774] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:45,217] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:45,217] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:45,239] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:45,239] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:45,531] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:45,531] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:45,679] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:45,679] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:45,700] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:45,700] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:46,278] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:46,278] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:46,433] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:46,433] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:46,454] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:46,454] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:46,746] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:46,746] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:46,903] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:46,903] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:46,925] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:46,925] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:47,514] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:47,514] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:47,665] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:47,665] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:47,686] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:47,686] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:47,984] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:47,984] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:48,144] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:48,144] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:48,166] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:48,166] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:48,746] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:48,746] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:48,906] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:48,906] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:02:48,928] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:02:48,929] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running <function meshgrid at 0x7f15bb111ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 489, in meshgrid
return _meshgrid(*tensors, indexing=indexing)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 504, in _meshgrid
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 150, in forward
return self.inference(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 203, in inference
images = self.preprocess_image(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 204, in <graph break in inference>
features = self.backbone(images.tensor)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function meshgrid at 0x7f15bb111ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval detectron2_fasterrcnn_r_101_c4 FAIL
Running detectron2_fasterrcnn_r_101_dc5...
[2022-10-30 18:03:10,309] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:10,309] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:11,650] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:11,650] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:12,022] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:12,022] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:13,147] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:13,147] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:13,815] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:13,815] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:14,400] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:14,400] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:14,625] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:14,626] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:15,524] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:15,524] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:15,887] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:15,887] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:16,111] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:16,112] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:17,480] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:17,480] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:17,839] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:17,839] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:19,466] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:19,466] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:19,837] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:19,837] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:20,065] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:20,065] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:20,821] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:20,821] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:21,190] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:21,190] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:21,411] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:21,411] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:21,916] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:21,916] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:22,529] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:22,529] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:22,755] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:22,755] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:24,095] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:24,095] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:24,460] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:24,460] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:26,143] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:26,143] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:26,514] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:26,514] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:26,737] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:26,737] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:27,271] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:27,271] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:27,898] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:27,898] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:28,129] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:28,129] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:28,644] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:28,644] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:29,010] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:29,011] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:29,238] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:29,238] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:30,020] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:30,020] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:30,388] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:30,388] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:30,613] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:30,613] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:31,132] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:31,132] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:31,776] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:31,776] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:32,005] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:32,005] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:32,539] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:32,540] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:32,917] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:32,918] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:33,146] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:33,146] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:33,943] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:33,943] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:34,308] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:34,308] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:34,531] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:34,531] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:35,040] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:35,040] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:35,688] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:35,688] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:35,913] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:35,914] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:36,433] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:36,433] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:36,794] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:36,794] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:37,017] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:37,017] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:37,818] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:37,819] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:38,189] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:38,189] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:38,412] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:38,412] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:38,928] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:38,928] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:39,578] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:39,578] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:39,805] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:39,805] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:40,324] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:40,324] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:40,686] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:40,686] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:40,908] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:40,908] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:41,359] torch._dynamo.convert_frame: [WARNING] torch._dynamo hit config.cache_size_limit (64)
function: 'forward' (/data/users/ezyang/detectron2/detectron2/layers/batch_norm.py:44)
reasons: ['___check_obj_id(self, 140281099975600)']
to diagnose recompilation issues, see https://github.com/pytorch/torchdynamo/blob/main/TROUBLESHOOTING.md.
[2022-10-30 18:03:41,532] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:41,532] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:41,707] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:41,707] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:41,730] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:41,730] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:42,062] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:42,062] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:42,233] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:42,233] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:42,255] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:42,255] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:42,936] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:42,936] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:43,100] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:43,100] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:43,124] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:43,124] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:43,445] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:43,446] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:43,886] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:43,886] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:43,908] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:43,908] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:44,224] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:44,224] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:44,383] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:44,383] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:44,405] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:44,405] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:45,002] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:45,002] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:45,167] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:45,167] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:45,189] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:45,189] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:45,495] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:45,495] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:45,655] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:45,655] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:45,677] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:45,677] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:46,272] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:46,272] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:46,438] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:46,438] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:46,460] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:46,460] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:46,764] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:46,764] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:46,923] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:46,923] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:46,944] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:46,944] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:47,550] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:47,550] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:47,710] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:47,710] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:47,731] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:47,731] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:48,630] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:48,630] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:48,788] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:48,788] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:49,256] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:49,256] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:49,700] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:49,700] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:49,723] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:49,723] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:50,027] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:50,027] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:50,189] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:50,189] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:03:50,211] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:03:50,211] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running <function meshgrid at 0x7f95411a0ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 489, in meshgrid
return _meshgrid(*tensors, indexing=indexing)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 504, in _meshgrid
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 150, in forward
return self.inference(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 203, in inference
images = self.preprocess_image(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 204, in <graph break in inference>
features = self.backbone(images.tensor)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function meshgrid at 0x7f95411a0ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval detectron2_fasterrcnn_r_101_dc5 FAIL
Running detectron2_fasterrcnn_r_101_fpn...
[2022-10-30 18:04:08,304] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:08,304] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:09,691] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:09,691] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:10,036] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:10,036] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:10,988] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:10,988] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:11,770] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:11,770] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:12,127] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:12,127] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:12,340] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:12,341] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:13,125] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:13,125] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:13,470] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:13,470] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:13,681] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:13,681] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:14,883] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:14,883] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:15,230] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:15,230] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:16,637] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:16,637] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:16,971] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:16,971] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:17,186] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:17,186] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:17,830] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:17,830] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:18,196] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:18,197] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:18,420] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:18,420] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:19,038] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:19,038] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:19,377] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:19,377] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:19,586] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:19,586] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:20,744] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:20,744] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:21,093] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:21,093] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:22,544] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:22,545] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:22,885] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:22,886] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:23,110] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:23,110] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:23,750] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:23,750] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:24,109] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:24,109] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:24,323] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:24,323] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:24,813] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:24,813] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:25,350] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:25,350] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:25,579] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:25,580] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:26,079] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:26,079] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:26,425] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:26,425] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:26,636] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:26,636] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:27,293] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:27,293] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:27,654] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:27,654] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:27,879] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:27,879] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:28,383] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:28,383] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:28,924] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:28,924] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:29,153] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:29,153] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:29,655] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:29,655] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:30,028] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:30,028] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:30,262] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:30,263] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:30,958] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:30,958] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:31,321] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:31,321] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:31,542] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:31,542] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:32,041] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:32,042] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:32,589] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:32,589] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:32,814] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:32,814] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:33,342] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:33,342] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:33,700] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:33,700] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:33,917] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:33,917] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:34,604] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:34,604] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:34,967] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:34,967] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:35,192] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:35,192] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:35,690] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:35,690] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:36,236] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:36,236] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:36,458] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:36,458] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:36,604] torch._dynamo.convert_frame: [WARNING] torch._dynamo hit config.cache_size_limit (64)
function: 'forward' (/data/users/ezyang/detectron2/detectron2/layers/batch_norm.py:44)
reasons: ['___check_obj_id(self, 140569429001312)']
to diagnose recompilation issues, see https://github.com/pytorch/torchdynamo/blob/main/TROUBLESHOOTING.md.
[2022-10-30 18:04:36,758] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:36,758] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:36,912] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:36,912] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:36,933] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:36,933] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:37,235] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:37,235] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:37,580] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:37,580] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:37,601] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:37,602] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:37,903] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:37,903] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:38,059] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:38,059] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:38,081] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:38,081] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:38,559] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:38,559] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:38,709] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:38,709] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:38,730] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:38,730] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:39,025] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:39,025] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:39,184] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:39,184] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:39,205] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:39,205] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:39,686] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:39,686] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:39,841] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:39,841] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:39,862] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:39,862] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:40,164] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:40,164] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:40,317] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:40,317] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:40,338] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:40,338] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:40,812] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:40,812] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:40,966] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:40,966] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:40,986] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:40,987] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:41,288] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:41,288] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:41,624] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:41,625] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:41,647] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:41,647] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:41,956] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:41,956] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:42,116] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:42,116] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:42,138] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:42,138] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:42,951] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:42,951] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:43,110] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:43,110] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:43,750] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:43,750] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:43,905] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:43,905] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:43,926] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:43,926] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:44,228] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:44,228] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:44,384] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:44,384] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:04:44,405] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:04:44,405] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running <function meshgrid at 0x7fd86a115ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 489, in meshgrid
return _meshgrid(*tensors, indexing=indexing)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 504, in _meshgrid
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 150, in forward
return self.inference(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 203, in inference
images = self.preprocess_image(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 204, in <graph break in inference>
features = self.backbone(images.tensor)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function meshgrid at 0x7fd86a115ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval detectron2_fasterrcnn_r_101_fpn FAIL
Running detectron2_fasterrcnn_r_50_c4...
[2022-10-30 18:05:12,151] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:12,151] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:13,364] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:13,364] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:13,709] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:13,709] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:14,718] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:14,718] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:15,518] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:15,518] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:15,878] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:15,878] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:16,105] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:16,105] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:16,897] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:16,897] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:17,260] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:17,260] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:17,475] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:17,475] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:18,725] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:18,726] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:19,072] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:19,072] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:20,565] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:20,565] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:20,917] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:20,917] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:21,146] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:21,146] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:21,818] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:21,818] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:22,181] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:22,181] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:22,399] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:22,399] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:22,892] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:22,892] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:23,454] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:23,455] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:23,683] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:23,683] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:24,950] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:24,950] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:25,320] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:25,321] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:26,836] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:26,836] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:27,188] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:27,188] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:27,403] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:27,403] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:27,891] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:27,891] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:28,437] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:28,437] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:28,662] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:28,662] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:29,182] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:29,182] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:29,540] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:29,540] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:29,757] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:29,757] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:30,485] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:30,485] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:30,851] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:30,851] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:31,085] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:31,085] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:31,597] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:31,597] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:32,169] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:32,169] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:32,393] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:32,393] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running <function meshgrid at 0x7f19157e7ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 489, in meshgrid
return _meshgrid(*tensors, indexing=indexing)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 504, in _meshgrid
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 150, in forward
return self.inference(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 203, in inference
images = self.preprocess_image(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 204, in <graph break in inference>
features = self.backbone(images.tensor)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function meshgrid at 0x7f19157e7ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval detectron2_fasterrcnn_r_50_c4 FAIL
Running detectron2_fasterrcnn_r_50_dc5...
[2022-10-30 18:05:52,660] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:52,660] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:53,820] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:53,820] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:54,161] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:54,161] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:55,141] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:55,141] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:55,738] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:55,738] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:56,216] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:56,216] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:56,423] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:56,423] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:57,028] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:57,028] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:57,526] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:57,526] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:57,738] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:57,738] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:58,949] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:58,949] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:05:59,309] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:05:59,309] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:00,807] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:00,808] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:01,152] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:01,152] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:01,360] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:01,360] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:01,989] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:01,989] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:02,342] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:02,342] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:02,561] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:02,561] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:03,042] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:03,042] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:03,556] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:03,556] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:03,769] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:03,769] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:04,957] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:04,958] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:05,309] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:05,309] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:06,781] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:06,781] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:07,136] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:07,136] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:07,352] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:07,352] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:07,841] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:07,841] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:08,364] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:08,364] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:08,577] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:08,577] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:09,060] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:09,060] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:09,406] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:09,407] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:09,619] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:09,619] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:10,284] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:10,284] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:10,638] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:10,638] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:10,856] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:10,856] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:11,358] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:11,359] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:11,890] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:11,890] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:12,117] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:12,117] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:13,387] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:13,387] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:13,737] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:13,737] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:15,123] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:15,123] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:15,663] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:15,663] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:15,884] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:15,884] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:16,378] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:16,378] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:16,725] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:16,725] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:16,935] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:16,935] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running <function meshgrid at 0x7f5a8fa0cee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 489, in meshgrid
return _meshgrid(*tensors, indexing=indexing)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 504, in _meshgrid
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 150, in forward
return self.inference(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 203, in inference
images = self.preprocess_image(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 204, in <graph break in inference>
features = self.backbone(images.tensor)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function meshgrid at 0x7f5a8fa0cee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval detectron2_fasterrcnn_r_50_dc5 FAIL
Running detectron2_fasterrcnn_r_50_fpn...
[2022-10-30 18:06:34,752] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:34,752] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:36,098] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:36,098] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:36,695] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:36,695] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:37,614] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:37,614] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:38,564] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:38,564] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:38,958] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:38,958] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:39,191] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:39,191] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:40,101] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:40,102] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:40,473] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:40,473] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:40,699] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:40,699] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:42,098] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:42,098] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:42,468] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:42,468] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:44,091] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:44,091] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:44,461] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:44,461] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:44,702] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:44,702] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:45,506] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:45,506] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:45,890] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:45,891] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:46,124] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:46,124] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:46,827] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:46,827] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:47,189] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:47,189] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:47,411] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:47,411] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:48,673] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:48,674] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:49,032] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:49,032] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:50,582] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:50,582] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:50,934] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:50,934] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:51,157] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:51,157] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:51,868] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:51,868] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:52,238] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:52,238] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:52,462] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:52,462] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:52,968] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:52,969] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:53,331] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:53,331] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:53,552] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:53,552] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:54,266] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:54,266] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:54,635] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:54,635] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:54,860] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:54,860] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:55,593] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:55,593] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:55,954] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:55,954] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:56,178] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:56,178] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:57,535] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:57,536] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:57,907] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:57,907] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:06:59,606] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:06:59,606] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:00,109] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:00,109] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:00,335] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:00,335] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:00,838] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:00,838] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:01,440] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:01,440] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:01,666] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:01,667] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running <function meshgrid at 0x7f2d1683bee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 489, in meshgrid
return _meshgrid(*tensors, indexing=indexing)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 504, in _meshgrid
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 150, in forward
return self.inference(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 203, in inference
images = self.preprocess_image(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 204, in <graph break in inference>
features = self.backbone(images.tensor)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function meshgrid at 0x7f2d1683bee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval detectron2_fasterrcnn_r_50_fpn FAIL
Running detectron2_fcos_r_50_fpn...
[2022-10-30 18:07:26,503] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:26,504] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:27,689] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:27,689] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:28,070] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:28,071] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:29,154] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:29,154] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:29,997] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:29,997] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:30,375] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:30,375] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:30,604] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:30,604] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:31,449] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:31,449] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:31,824] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:31,824] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:32,055] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:32,055] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:33,228] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:33,228] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:33,604] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:33,605] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:35,168] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:35,168] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:35,538] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:35,538] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:35,767] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:35,767] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:36,466] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:36,466] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:36,837] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:36,838] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:37,065] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:37,065] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:37,588] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:37,588] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:38,132] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:38,133] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:38,361] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:38,361] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:39,297] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:39,298] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:39,845] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:39,845] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:41,241] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:41,241] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:41,792] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:41,792] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:42,021] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:42,022] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:42,546] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:42,546] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:42,916] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:42,916] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:43,147] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:43,147] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:43,855] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:43,855] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:44,230] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:44,230] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:44,455] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:44,455] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:44,971] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:44,971] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:45,529] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:45,530] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:45,758] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:45,758] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:46,279] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:46,280] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:46,648] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:46,648] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:46,882] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:46,882] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:48,039] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:48,039] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:48,409] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:48,409] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:49,985] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:49,985] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:50,362] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:50,362] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:50,591] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:50,591] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:51,315] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:51,315] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:51,684] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:51,684] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:07:51,911] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:07:51,911] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running <function meshgrid at 0x7f30f4a26ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 489, in meshgrid
return _meshgrid(*tensors, indexing=indexing)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 504, in _meshgrid
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/dense_detector.py", line 95, in forward
images = self.preprocess_image(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/dense_detector.py", line 96, in <graph break in forward>
features = self.backbone(images.tensor)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/dense_detector.py", line 106, in <graph break in forward>
results = self.forward_inference(images, features, predictions)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function meshgrid at 0x7f30f4a26ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval detectron2_fcos_r_50_fpn FAIL
Running detectron2_maskrcnn_r_101_c4...
[2022-10-30 18:09:05,575] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:05,575] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:06,784] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:06,785] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:07,306] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:07,306] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:08,166] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:08,166] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:08,963] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:08,963] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:09,324] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:09,324] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:09,535] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:09,535] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:10,344] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:10,344] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:10,696] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:10,696] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:10,913] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:10,914] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:12,221] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:12,221] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:12,583] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:12,583] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:14,128] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:14,129] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:14,482] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:14,482] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:14,696] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:14,696] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:15,392] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:15,392] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:15,755] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:15,755] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:15,982] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:15,982] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:16,706] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:16,706] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:17,081] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:17,081] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:17,311] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:17,311] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:18,583] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:18,583] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:18,943] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:18,944] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:20,513] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:20,513] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:20,866] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:20,866] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:21,089] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:21,089] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:21,596] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:21,596] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:22,167] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:22,168] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:22,387] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:22,387] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:22,892] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:22,892] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:23,264] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:23,265] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:23,483] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:23,483] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:24,199] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:24,200] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:24,547] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:24,547] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:24,775] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:24,775] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:25,303] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:25,303] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:25,875] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:25,876] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:26,098] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:26,098] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:26,598] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:26,598] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:26,944] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:26,944] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:27,164] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:27,165] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:27,891] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:27,891] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:28,253] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:28,253] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:28,469] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:28,469] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:28,969] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:28,969] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:29,540] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:29,540] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:29,762] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:29,762] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:30,263] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:30,263] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:30,609] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:30,609] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:30,826] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:30,826] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:31,561] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:31,561] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:31,917] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:31,917] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:32,142] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:32,142] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:32,664] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:32,664] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:33,276] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:33,276] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:33,496] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:33,496] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:33,992] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:33,993] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:34,348] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:34,348] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:34,559] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:34,559] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:34,940] torch._dynamo.convert_frame: [WARNING] torch._dynamo hit config.cache_size_limit (64)
function: 'forward' (/data/users/ezyang/detectron2/detectron2/layers/batch_norm.py:44)
reasons: ['___check_obj_id(self, 140392471582752)']
to diagnose recompilation issues, see https://github.com/pytorch/torchdynamo/blob/main/TROUBLESHOOTING.md.
[2022-10-30 18:09:35,102] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:35,102] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:35,256] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:35,256] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:35,276] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:35,277] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:35,573] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:35,573] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:35,960] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:35,960] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:35,982] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:35,982] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:36,288] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:36,289] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:36,446] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:36,446] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:36,467] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:36,467] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:36,770] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:36,771] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:37,158] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:37,158] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:37,180] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:37,180] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:37,482] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:37,482] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:37,636] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:37,636] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:37,657] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:37,657] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:38,200] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:38,200] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:38,362] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:38,363] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:38,385] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:38,385] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:38,692] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:38,692] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:38,849] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:38,849] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:38,870] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:38,870] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:39,414] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:39,415] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:39,567] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:39,568] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:39,588] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:39,589] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:39,888] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:39,888] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:40,045] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:40,045] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:40,067] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:40,067] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:40,597] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:40,597] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:40,754] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:40,754] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:09:40,775] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:40,775] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running <function meshgrid at 0x7faf36c58ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 489, in meshgrid
return _meshgrid(*tensors, indexing=indexing)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 504, in _meshgrid
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 150, in forward
return self.inference(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 203, in inference
images = self.preprocess_image(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 204, in <graph break in inference>
features = self.backbone(images.tensor)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function meshgrid at 0x7faf36c58ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval detectron2_maskrcnn_r_101_c4 FAIL
Running detectron2_maskrcnn_r_101_fpn...
[2022-10-30 18:09:59,706] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:09:59,706] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:01,068] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:01,068] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:01,665] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:01,665] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:02,594] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:02,594] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:03,515] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:03,515] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:03,876] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:03,877] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:04,108] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:04,108] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:05,010] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:05,011] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:05,380] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:05,380] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:05,607] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:05,607] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:06,999] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:06,999] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:07,375] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:07,375] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:09,013] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:09,014] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:09,386] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:09,386] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:09,613] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:09,613] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:10,389] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:10,389] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:10,759] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:10,760] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:10,994] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:10,994] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:11,780] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:11,780] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:12,169] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:12,169] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:12,407] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:12,408] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:13,806] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:13,806] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:14,195] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:14,195] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:15,877] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:15,877] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:16,253] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:16,253] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:16,479] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:16,479] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:17,002] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:17,002] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:17,648] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:17,648] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:17,874] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:17,874] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:18,392] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:18,392] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:18,760] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:18,761] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:18,986] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:18,987] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:19,787] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:19,787] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:20,153] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:20,153] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:20,374] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:20,374] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:20,891] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:20,891] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:21,613] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:21,614] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:21,848] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:21,849] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:22,386] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:22,386] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:22,762] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:22,762] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:22,988] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:22,988] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:23,841] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:23,842] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:24,214] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:24,214] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:24,437] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:24,438] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:24,964] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:24,964] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:25,676] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:25,677] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:25,905] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:25,905] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:26,423] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:26,423] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:26,790] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:26,790] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:27,019] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:27,019] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:27,896] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:27,896] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:28,271] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:28,271] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:28,497] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:28,497] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:29,030] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:29,030] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:29,744] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:29,744] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:29,973] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:29,973] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:30,488] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:30,488] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:30,854] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:30,855] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:31,082] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:31,082] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:31,237] torch._dynamo.convert_frame: [WARNING] torch._dynamo hit config.cache_size_limit (64)
function: 'forward' (/data/users/ezyang/detectron2/detectron2/layers/batch_norm.py:44)
reasons: ['___check_obj_id(self, 140596217870560)']
to diagnose recompilation issues, see https://github.com/pytorch/torchdynamo/blob/main/TROUBLESHOOTING.md.
[2022-10-30 18:10:31,748] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:31,748] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:31,912] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:31,912] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:31,934] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:31,934] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:32,252] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:32,252] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:32,420] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:32,420] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:32,444] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:32,444] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:33,127] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:33,128] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:33,298] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:33,298] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:33,323] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:33,323] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:33,635] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:33,635] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:33,796] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:33,796] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:33,818] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:33,819] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:34,468] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:34,469] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:34,631] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:34,631] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:34,653] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:34,654] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:34,963] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:34,964] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:35,458] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:35,458] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:35,481] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:35,482] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:35,788] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:35,788] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:35,948] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:35,948] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:35,969] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:35,970] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:36,625] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:36,626] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:36,787] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:36,787] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:36,809] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:36,809] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:37,123] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:37,123] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:37,286] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:37,287] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:37,308] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:37,309] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:37,974] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:37,975] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:38,142] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:38,142] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:38,164] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:38,165] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:39,158] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:39,158] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:39,320] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:39,320] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:39,778] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:39,778] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:40,287] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:40,287] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:40,310] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:40,311] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:40,615] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:40,615] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:40,784] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:40,784] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:10:40,808] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:10:40,809] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running <function meshgrid at 0x7fdea4e52ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 489, in meshgrid
return _meshgrid(*tensors, indexing=indexing)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 504, in _meshgrid
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 150, in forward
return self.inference(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 203, in inference
images = self.preprocess_image(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 204, in <graph break in inference>
features = self.backbone(images.tensor)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function meshgrid at 0x7fdea4e52ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval detectron2_maskrcnn_r_101_fpn FAIL
Running detectron2_maskrcnn_r_50_c4...
[2022-10-30 18:11:09,589] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:09,589] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:10,910] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:10,910] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:11,278] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:11,278] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:12,396] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:12,396] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:13,065] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:13,065] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:13,656] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:13,656] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:13,883] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:13,883] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:14,547] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:14,548] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:15,143] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:15,143] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:15,371] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:15,371] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:16,732] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:16,732] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:17,103] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:17,103] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:18,719] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:18,719] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:19,086] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:19,087] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:19,312] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:19,312] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:20,076] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:20,076] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:20,443] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:20,443] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:20,667] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:20,667] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:21,182] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:21,182] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:21,792] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:21,792] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:22,022] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:22,022] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:23,373] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:23,374] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:23,747] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:23,748] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:25,439] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:25,439] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:25,812] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:25,812] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:26,042] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:26,042] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:26,561] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:26,561] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:26,932] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:26,932] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:27,167] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:27,167] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:27,960] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:27,960] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:28,340] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:28,340] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:28,573] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:28,573] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:29,377] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:29,377] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:29,763] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:29,763] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:30,000] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:30,000] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:30,528] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:30,528] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:31,175] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:31,175] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:31,406] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:31,406] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running <function meshgrid at 0x7f1a56da4ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 489, in meshgrid
return _meshgrid(*tensors, indexing=indexing)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 504, in _meshgrid
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 150, in forward
return self.inference(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 203, in inference
images = self.preprocess_image(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 204, in <graph break in inference>
features = self.backbone(images.tensor)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function meshgrid at 0x7f1a56da4ee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval detectron2_maskrcnn_r_50_c4 FAIL
Running detectron2_maskrcnn_r_50_fpn...
[2022-10-30 18:11:50,418] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:50,418] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:51,760] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:51,761] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:52,354] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:52,355] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:53,253] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:53,253] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:54,146] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:54,146] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:54,512] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:54,512] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:54,740] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:54,741] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:55,640] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:55,640] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:56,004] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:56,004] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:56,232] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:56,232] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:57,611] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:57,611] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:57,979] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:57,979] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:11:59,608] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:11:59,608] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:00,226] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:00,226] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:00,452] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:00,453] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:00,967] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:00,968] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:01,337] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:01,337] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:01,562] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:01,563] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:02,337] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:02,337] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:02,700] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:02,700] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:02,927] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:02,927] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:04,281] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:04,281] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:04,647] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:04,647] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:06,378] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:06,378] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:06,746] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:06,746] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:06,977] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:06,977] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:07,770] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:07,770] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:08,150] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:08,150] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:08,385] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:08,385] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:08,903] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:08,903] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:09,536] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:09,536] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:09,768] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:09,768] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:10,296] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:10,297] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:10,660] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:10,660] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:10,885] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:10,885] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:11,693] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:11,693] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:12,079] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:12,080] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:12,306] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:12,306] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:13,773] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:13,773] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:14,142] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:14,142] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:15,793] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:15,793] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:16,161] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:16,161] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:16,387] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:16,387] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:17,185] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:17,185] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:17,556] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:17,556] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
[2022-10-30 18:12:17,785] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:12:17,785] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running <function meshgrid at 0x7f6a5d85bee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 489, in meshgrid
return _meshgrid(*tensors, indexing=indexing)
File "/data/users/ezyang/pytorch-tmp/torch/functional.py", line 504, in _meshgrid
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 150, in forward
return self.inference(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 203, in inference
images = self.preprocess_image(batched_inputs)
File "/data/users/ezyang/detectron2/detectron2/modeling/meta_arch/rcnn.py", line 204, in <graph break in inference>
features = self.backbone(images.tensor)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function meshgrid at 0x7f6a5d85bee0>(*(FakeTensor(FakeTensor(..., device='meta', size=(s2,)), cuda:0), FakeTensor(FakeTensor(..., device='meta', size=(s3,)), cuda:0)), **{}):
Cannot call numel() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval detectron2_maskrcnn_r_50_fpn FAIL
Running dlrm...
ERROR:common:Failed running <class 'range'>(*(9,), **{}):
'SymInt' object cannot be interpreted as an integer
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
TypeError: 'SymInt' object cannot be interpreted as an integer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 807, in CALL_FUNCTION_EX
self.call_function(fn, argsvars.items, kwargsvars.items)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builtin.py", line 369, in call_function
return DynamicShapeVariable.create(tx, proxy, None, **options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 634, in create
dyn_shape = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <class 'range'>(*(9,), **{}):
'SymInt' object cannot be interpreted as an integer
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval dlrm FAIL
/data/users/ezyang/pytorch-tmp/torch/utils/tensorboard/__init__.py:4: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead.
if not hasattr(tensorboard, "__version__") or LooseVersion(
/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/gym/core.py:317: DeprecationWarning: WARN: Initializing wrapper in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.
deprecation(
Running drq...
cuda train drq FAIL (TIMEOUT)
Running fastNLP_Bert...
[2022-10-30 18:17:57,474] torch._inductor.ir: [WARNING] DeviceCopy
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embeddings': <class 'fastNLP.modules.encoder.bert.BertEmbeddings'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/modules/encoder/bert.py", line 230, in forward
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/modules/encoder/bert.py", line 512, in forward
embedding_output = self.embeddings(input_ids, token_type_ids)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/models/bert.py", line 265, in forward
sequence_output = self.bert(words)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/embeddings/bert_embedding.py", line 137, in forward
outputs = self.model(words)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/embeddings/bert_embedding.py", line 445, in forward
max_word_piece_length = batch_word_pieces_length.sum(dim=-1).max().item() # 表示word piece的长度(包括padding)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/embeddings/bert_embedding.py", line 462, in <graph break in forward>
word_indexes = words.cpu().numpy()
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/embeddings/bert_embedding.py", line 482, in <graph break in forward>
bert_outputs, pooled_cls = self.encoder(word_pieces, token_type_ids=token_type_ids, attention_mask=attn_masks,
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/modules/encoder/bert.py", line 480, in forward
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embeddings': <class 'fastNLP.modules.encoder.bert.BertEmbeddings'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/modules/encoder/bert.py", line 230, in forward
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/modules/encoder/bert.py", line 512, in forward
embedding_output = self.embeddings(input_ids, token_type_ids)
TorchDynamo optimized model failed to run because of following error
cuda eval fastNLP_Bert FAIL
Running functorch_dp_cifar10...
cuda train functorch_dp_cifar10 FAIL (TIMEOUT)
Running functorch_maml_omniglot...
cuda eval functorch_maml_omniglot PASS
Running hf_Albert...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 990, in forward
outputs = self.albert(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 990, in <graph break in forward>
outputs = self.albert(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda eval hf_Albert FAIL
Running hf_Bart...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 134, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 801, in forward
embed_pos = self.embed_positions(input_shape)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/util/framework/huggingface/model_factory.py", line 41, in forward
return self.model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 1353, in forward
outputs = self.model(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 1222, in forward
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 735, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 134, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 801, in forward
embed_pos = self.embed_positions(input_shape)
TorchDynamo optimized model failed to run because of following error
cuda eval hf_Bart FAIL
Running hf_Bert...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 1351, in forward
outputs = self.bert(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 1351, in <graph break in forward>
outputs = self.bert(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda eval hf_Bert FAIL
Running hf_BigBird...
WARNING:common:fp64 golden ref were not generated for hf_BigBird
ERROR:common:(False, s0, s1)
While executing return (False, sym_size, sym_size_1)
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/big_bird/modeling_big_bird.py", line 2462, in forward
outputs = self.bert(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/big_bird/modeling_big_bird.py", line 2104, in forward
blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/big_bird/modeling_big_bird.py", line 2185, in create_masks_for_block_sparse_attn
@staticmethod
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 267, in output
assert all(
AssertionError: (False, s0, s1)
While executing return (False, sym_size, sym_size_1)
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval hf_BigBird FAIL
Running hf_DistilBert...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 649, in forward
dlbrt_output = self.distilbert(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 649, in <graph break in forward>
dlbrt_output = self.distilbert(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda eval hf_DistilBert FAIL
Running hf_GPT2...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 793, in forward
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 1048, in forward
transformer_outputs = self.transformer(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 738, in forward
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 793, in forward
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
TorchDynamo optimized model failed to run because of following error
cuda eval hf_GPT2 FAIL
Running hf_GPT2_large...
WARNING:common:fp64 golden ref were not generated for hf_GPT2_large
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 793, in forward
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 1048, in forward
transformer_outputs = self.transformer(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 738, in forward
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 793, in forward
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
TorchDynamo optimized model failed to run because of following error
cuda eval hf_GPT2_large FAIL
Running hf_Longformer...
[2022-10-30 18:27:20,175] torch._dynamo.variables.builtin: [WARNING] incorrect arg count <bound method BuiltinVariable._call_min_max of BuiltinVariable(max)> missing a required argument: 'b'
[2022-10-30 18:27:20,202] torch._dynamo.variables.builtin: [WARNING] incorrect arg count <bound method BuiltinVariable._call_min_max of BuiltinVariable(max)> missing a required argument: 'b'
[2022-10-30 18:27:20,227] torch._dynamo.variables.builtin: [WARNING] incorrect arg count <bound method BuiltinVariable._call_min_max of BuiltinVariable(max)> missing a required argument: 'b'
[2022-10-30 18:27:21,088] torch._dynamo.variables.builtin: [WARNING] incorrect arg count <bound method BuiltinVariable._call_min_max of BuiltinVariable(max)> missing a required argument: 'b'
[2022-10-30 18:27:23,841] torch._inductor.ir: [WARNING] Using FallbackKernel: aten.cumsum
[2022-10-30 18:27:26,286] torch._inductor.graph: [WARNING] Creating implicit fallback for:
target: <built-in function eq>
args[0]: 768
args[1]: 768
ERROR:common:AttributeError: 'bool' object has no attribute 'device'
target: <built-in function eq>
args[0]: 768
args[1]: 768
While executing %eq : [#users=1] = call_function[target=operator.eq](args = (%sym_size_2, 768), kwargs = {})
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2922, in create
example_output.device,
AttributeError: 'bool' object has no attribute 'device'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1813, in forward
outputs = self.longformer(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1696, in forward
padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1715, in <graph break in forward>
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1265, in forward
is_global_attn = is_index_global_attn.flatten().any().item()
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1297, in <graph break in forward>
layer_outputs = layer_module(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1221, in forward
self_attn_outputs = self.attention(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1157, in forward
self_outputs = self.self(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 542, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'bool' object has no attribute 'device'
target: <built-in function eq>
args[0]: 768
args[1]: 768
While executing %eq : [#users=1] = call_function[target=operator.eq](args = (%sym_size_2, 768), kwargs = {})
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval hf_Longformer FAIL
Running hf_Reformer...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 249, in forward
position_ids = torch.arange(
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 2397, in forward
reformer_outputs = self.reformer(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 2063, in forward
least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 2100, in <graph break in forward>
embedding_output = self.embeddings(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 239, in forward
def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 249, in forward
position_ids = torch.arange(
TorchDynamo optimized model failed to run because of following error
cuda eval hf_Reformer FAIL
Running hf_T5...
WARNING:common:fp64 golden ref were not generated for hf_T5
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_1,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/util/framework/huggingface/model_factory.py", line 41, in forward
return self.model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 631, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_1,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
TorchDynamo optimized model failed to run because of following error
cuda eval hf_T5 FAIL
Running hf_T5_base...
WARNING:common:fp64 golden ref were not generated for hf_T5_base
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_1,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/util/framework/huggingface/model_factory.py", line 41, in forward
return self.model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 631, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_1,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
TorchDynamo optimized model failed to run because of following error
cuda eval hf_T5_base FAIL
Running hf_T5_large...
WARNING:common:fp64 golden ref were not generated for hf_T5_large
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_1,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/util/framework/huggingface/model_factory.py", line 41, in forward
return self.model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 631, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_1,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
TorchDynamo optimized model failed to run because of following error
cuda eval hf_T5_large FAIL
Running lennard_jones...
cuda eval lennard_jones PASS
Running maml...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/maml/meta.py", line 68, in forward
return self.finetunning(x_spt[0], y_spt[0], x_qry[0], y_qry[0])
File "/data/users/ezyang/benchmark/torchbenchmark/models/maml/meta.py", line 155, in finetunning
def finetunning(self, x_spt, y_spt, x_qry, y_qry):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda eval maml FAIL
Running maml_omniglot...
cuda eval maml_omniglot PASS
Running mnasnet1_0...
cuda eval mnasnet1_0 PASS
Running mobilenet_v2...
cuda eval mobilenet_v2 PASS
WARNING:root:mobilenet_v2_quantized_qat failed to load
The eval test only supports CPU.
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1746, in main
device, name, model, example_inputs, batch_size = runner.load_model(
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 255, in load_model
benchmark = benchmark_cls(
File "/data/users/ezyang/benchmark/torchbenchmark/util/model.py", line 18, in __call__
obj = type.__call__(cls, *args, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/mobilenet_v2_quantized_qat/__init__.py", line 21, in __init__
raise NotImplementedError("The eval test only supports CPU.")
NotImplementedError: The eval test only supports CPU.
Running mobilenet_v3_large...
cuda eval mobilenet_v3_large PASS
devgpu001:2660179:2660179 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth0
devgpu001:2660179:2660179 [0] NCCL INFO NCCL_SOCKET_IFNAME set to eth0
devgpu001:2660179:2660179 [0] NCCL INFO Bootstrap : Using eth0:2803:6081:d0a8:baaf::1<0>
devgpu001:2660179:2660179 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
devgpu001:2660179:2660179 [0] NCCL INFO cudaDriverVersion 11040
NCCL version 2.14.3+cuda11.4
devgpu001:2660179:2661428 [0] NCCL INFO NCCL_IB_DISABLE set by environment to 1.
devgpu001:2660179:2661428 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth0
devgpu001:2660179:2661428 [0] NCCL INFO NET/Socket : Using [0]eth0:2803:6081:d0a8:baaf::1<0>
devgpu001:2660179:2661428 [0] NCCL INFO Using network Socket
devgpu001:2660179:2661428 [0] NCCL INFO NET/Socket : GPU Direct RDMA Disabled for HCA 0 'eth0'
devgpu001:2660179:2661428 [0] NCCL INFO === System : maxBw 5000.0 totalBw 0.0 ===
devgpu001:2660179:2661428 [0] NCCL INFO CPU/0 (1/1/2)
devgpu001:2660179:2661428 [0] NCCL INFO + PCI[12.0] - PCI/D000 (11f840001d9bfbe1)
devgpu001:2660179:2661428 [0] NCCL INFO + PCI[24.0] - PCI/F000 (11f840001d9bfbe0)
devgpu001:2660179:2661428 [0] NCCL INFO + PCI[24.0] - GPU/11000 (0)
devgpu001:2660179:2661428 [0] NCCL INFO + PCI[12.0] - NIC/30000
devgpu001:2660179:2661428 [0] NCCL INFO ==========================================
devgpu001:2660179:2661428 [0] NCCL INFO GPU/11000 :GPU/11000 (0/5000.000000/LOC) CPU/0 (3/12.000000/PHB)
devgpu001:2660179:2661428 [0] NCCL INFO Setting affinity for GPU 0 to ffffff,00000000,00000000,00ffffff
devgpu001:2660179:2661428 [0] NCCL INFO Pattern 4, crossNic 0, nChannels 16, bw 44.000000/44.000000, type LOC/PIX, sameChannels 1
devgpu001:2660179:2661428 [0] NCCL INFO 0 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 1 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 2 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 3 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 4 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 5 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 6 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 7 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 8 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 9 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 10 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 11 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 12 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 13 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 14 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 15 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO Pattern 3, crossNic 0, nChannels 16, bw 44.000000/44.000000, type LOC/PIX, sameChannels 1
devgpu001:2660179:2661428 [0] NCCL INFO 0 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 1 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 2 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 3 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 4 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 5 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 6 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 7 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 8 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 9 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 10 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 11 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 12 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 13 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 14 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 15 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO Pattern 3, crossNic 0, nChannels 16, bw 44.000000/44.000000, type LOC/PIX, sameChannels 1
devgpu001:2660179:2661428 [0] NCCL INFO 0 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 1 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 2 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 3 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 4 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 5 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 6 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 7 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 8 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 9 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 10 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 11 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 12 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 13 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 14 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO 15 : GPU/0
devgpu001:2660179:2661428 [0] NCCL INFO Tree 0 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 16 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 1 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 17 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 2 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 18 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 3 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 19 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 4 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 20 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 5 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 21 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 6 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 22 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 7 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 23 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 8 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 24 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 9 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 25 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 10 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 26 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 11 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 27 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 12 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 28 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 13 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 29 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 14 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 30 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 15 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Tree 31 : -1 -> 0 -> -1/-1/-1
devgpu001:2660179:2661428 [0] NCCL INFO Channel 00/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 01/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 02/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 03/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 04/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 05/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 06/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 07/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 08/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 09/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 10/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 11/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 12/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 13/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 14/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 15/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 16/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 17/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 18/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 19/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 20/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 21/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 22/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 23/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 24/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 25/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 26/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 27/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 28/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 29/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 30/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Channel 31/32 : 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 00 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 01 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 02 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 03 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 04 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 05 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 06 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 07 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 08 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 09 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 10 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 11 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 12 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 13 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 14 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 15 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 16 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 17 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 18 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 19 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 20 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 21 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 22 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 23 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 24 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 25 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 26 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 27 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 28 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 29 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 30 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Ring 31 : 0 -> 0 -> 0
devgpu001:2660179:2661428 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1 [1] -1/-1/-1->0->-1 [2] -1/-1/-1->0->-1 [3] -1/-1/-1->0->-1 [4] -1/-1/-1->0->-1 [5] -1/-1/-1->0->-1 [6] -1/-1/-1->0->-1 [7] -1/-1/-1->0->-1 [8] -1/-1/-1->0->-1 [9] -1/-1/-1->0->-1 [10] -1/-1/-1->0->-1 [11] -1/-1/-1->0->-1 [12] -1/-1/-1->0->-1 [13] -1/-1/-1->0->-1 [14] -1/-1/-1->0->-1 [15] -1/-1/-1->0->-1 [16] -1/-1/-1->0->-1 [17] -1/-1/-1->0->-1 [18] -1/-1/-1->0->-1 [19] -1/-1/-1->0->-1 [20] -1/-1/-1->0->-1 [21] -1/-1/-1->0->-1 [22] -1/-1/-1->0->-1 [23] -1/-1/-1->0->-1 [24] -1/-1/-1->0->-1 [25] -1/-1/-1->0->-1 [26] -1/-1/-1->0->-1 [27] -1/-1/-1->0->-1 [28] -1/-1/-1->0->-1 [29] -1/-1/-1->0->-1 [30] -1/-1/-1->0->-1 [31] -1/-1/-1->0->-1
devgpu001:2660179:2661428 [0] NCCL INFO Connected all rings
devgpu001:2660179:2661428 [0] NCCL INFO Connected all trees
devgpu001:2660179:2661428 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer
devgpu001:2660179:2661429 [0] NCCL INFO New proxy send connection 0 from local rank 0, transport 2
devgpu001:2660179:2661428 [0] NCCL INFO Connection to proxy localRank 0 -> connection 0x7fccac002e80
devgpu001:2660179:2661428 [0] NCCL INFO comm 0x55a3f0d02590 rank 0 nranks 1 cudaDev 0 busId 11000 - Init COMPLETE
Running moco...
[2022-10-30 18:32:32,536] torch._dynamo.variables.torch: [WARNING] Profiler will be ignored
ERROR:common:argument of type: <class 'range_iterator'>
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/parallel/distributed.py", line 1093, in forward
output = self._run_ddp_forward(*inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/parallel/distributed.py", line 1047, in _run_ddp_forward
return module_to_run(*inputs[0], **kwargs[0])
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/moco/moco/builder.py", line 130, in forward
self._momentum_update_key_encoder() # update the key encoder
File "/data/users/ezyang/benchmark/torchbenchmark/models/moco/moco/builder.py", line 133, in <graph break in forward>
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
File "/data/users/ezyang/pytorch-tmp/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/moco/moco/builder.py", line 76, in _batch_shuffle_ddp
x_gather = concat_all_gather(x)
File "/data/users/ezyang/pytorch-tmp/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/moco/moco/builder.py", line 164, in concat_all_gather
@torch.no_grad()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 326, in aot_dispatch_base
fw_module = make_fx(flat_fn, aot_config.decompositions)(*flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/proxy_tensor.py", line 671, in wrapped
t = dispatch_trace(wrap_key(func, args, fx_tracer), tracer=fx_tracer, concrete_args=tuple(phs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/proxy_tensor.py", line 422, in dispatch_trace
graph = tracer.trace(root, concrete_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/_symbolic_trace.py", line 739, in trace
(self.create_arg(fn(*args)),),
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/proxy_tensor.py", line 412, in create_arg
return super().create_arg(a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/_symbolic_trace.py", line 344, in create_arg
return super().create_arg(a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/proxy.py", line 140, in create_arg
return type(a)(self.create_arg(elem) for elem in a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/proxy.py", line 140, in <genexpr>
return type(a)(self.create_arg(elem) for elem in a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/proxy_tensor.py", line 412, in create_arg
return super().create_arg(a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/_symbolic_trace.py", line 344, in create_arg
return super().create_arg(a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/proxy.py", line 165, in create_arg
raise NotImplementedError(f"argument of type: {type(a)}")
NotImplementedError: argument of type: <class 'range_iterator'>
incomplete graph:
class <lambda>(torch.nn.Module):
def forward(self):
pass
TorchDynamo optimized model failed to run because of following error
cuda eval moco FAIL
Running nvidia_deeprecommender...
cuda eval nvidia_deeprecommender PASS
Running opacus_cifar10...
cuda train opacus_cifar10 FAIL (TIMEOUT)
Running pyhpc_equation_of_state...
cuda eval pyhpc_equation_of_state PASS
Running pyhpc_isoneutral_mixing...
cuda eval pyhpc_isoneutral_mixing PASS
Running pyhpc_turbulent_kinetic_energy...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size,), kwargs = {device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pyhpc_turbulent_kinetic_energy/tke_pytorch.py", line 29, in solve_implicit
torch.arange(a.shape[2], device=ks.device)[None, None, :] == ks[:, :, None]
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/pyhpc_turbulent_kinetic_energy/__init__.py", line 96, in forward
return tke_pytorch.integrate_tke(
File "/data/users/ezyang/benchmark/torchbenchmark/models/pyhpc_turbulent_kinetic_energy/tke_pytorch.py", line 249, in integrate_tke
sol, water_mask = solve_implicit(ks, a_tri, b_tri, c_tri, d_tri, b_edge=b_tri_edge)
File "/data/users/ezyang/benchmark/torchbenchmark/models/pyhpc_turbulent_kinetic_energy/tke_pytorch.py", line 26, in solve_implicit
def solve_implicit(ks, a, b, c, d, b_edge):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size,), kwargs = {device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pyhpc_turbulent_kinetic_energy/tke_pytorch.py", line 29, in solve_implicit
torch.arange(a.shape[2], device=ks.device)[None, None, :] == ks[:, :, None]
TorchDynamo optimized model failed to run because of following error
cuda eval pyhpc_turbulent_kinetic_energy FAIL
Running pytorch_CycleGAN_and_pix2pix...
cuda eval pytorch_CycleGAN_and_pix2pix PASS
Running pytorch_stargan...
cuda eval pytorch_stargan PASS
WARNING:root:pytorch_struct failed to load
Test eval is not implemented.
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1746, in main
device, name, model, example_inputs, batch_size = runner.load_model(
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 255, in load_model
benchmark = benchmark_cls(
File "/data/users/ezyang/benchmark/torchbenchmark/util/model.py", line 18, in __call__
obj = type.__call__(cls, *args, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_struct/__init__.py", line 61, in __init__
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
File "/data/users/ezyang/benchmark/torchbenchmark/util/model.py", line 82, in __init__
self.determine_batch_size(batch_size)
File "/data/users/ezyang/benchmark/torchbenchmark/util/model.py", line 180, in determine_batch_size
raise NotImplementedError(f"Test {self.test} is not implemented.")
NotImplementedError: Test eval is not implemented.
Running pytorch_unet...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 80.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 80.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 80.0000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_30,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'mod': <class 'torchbenchmark.models.pytorch_unet.pytorch_unet.unet.unet_model.UNet'>, 'mod_up1': <class 'torchbenchmark.models.pytorch_unet.pytorch_unet.unet.unet_parts.Up'>, 'mod_up1_up': <class 'torch.nn.modules.upsampling.Upsample'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_unet/pytorch_unet/unet/unet_parts.py", line 57, in forward
x1 = self.up(x1)
| File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_unet/pytorch_unet/unet/unet_model.py", line 31, in forward
x = self.up1(x5, x4)
| File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 80.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 80.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 328, in forward_pass
def forward_pass(self, mod, inputs, collect_outputs=True):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 80.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 80.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 80.0000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_30,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'mod': <class 'torchbenchmark.models.pytorch_unet.pytorch_unet.unet.unet_model.UNet'>, 'mod_up1': <class 'torchbenchmark.models.pytorch_unet.pytorch_unet.unet.unet_parts.Up'>, 'mod_up1_up': <class 'torch.nn.modules.upsampling.Upsample'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_unet/pytorch_unet/unet/unet_parts.py", line 57, in forward
x1 = self.up(x1)
| File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_unet/pytorch_unet/unet/unet_model.py", line 31, in forward
x = self.up1(x5, x4)
| File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
TorchDynamo optimized model failed to run because of following error
cuda eval pytorch_unet FAIL
Running resnet18...
cuda eval resnet18 PASS
Running resnet50...
cuda eval resnet50 PASS
WARNING:root:resnet50_quantized_qat failed to load
The eval test only supports CPU.
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1746, in main
device, name, model, example_inputs, batch_size = runner.load_model(
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 255, in load_model
benchmark = benchmark_cls(
File "/data/users/ezyang/benchmark/torchbenchmark/util/model.py", line 18, in __call__
obj = type.__call__(cls, *args, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/resnet50_quantized_qat/__init__.py", line 21, in __init__
raise NotImplementedError("The eval test only supports CPU.")
NotImplementedError: The eval test only supports CPU.
Running resnext50_32x4d...
cuda eval resnext50_32x4d PASS
Running shufflenet_v2_x1_0...
cuda eval shufflenet_v2_x1_0 PASS
/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/gym/core.py:317: DeprecationWarning: WARN: Initializing wrapper in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.
deprecation(
/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/gym/wrappers/step_api_compatibility.py:39: DeprecationWarning: WARN: Initializing environment in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.
deprecation(
/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/gym/core.py:256: DeprecationWarning: WARN: Function `env.seed(seed)` is marked as deprecated and will be removed in the future. Please use `env.reset(seed=seed)` instead.
deprecation(
Running soft_actor_critic...
cuda eval soft_actor_critic PASS
Running speech_transformer...
ERROR:common:Failed running <class 'range'>(*(s0,), **{}):
'SymInt' object cannot be interpreted as an integer
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
TypeError: 'SymInt' object cannot be interpreted as an integer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 807, in CALL_FUNCTION_EX
self.call_function(fn, argsvars.items, kwargsvars.items)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 819, in CALL_FUNCTION_KW
self.call_function(fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builtin.py", line 369, in call_function
return DynamicShapeVariable.create(tx, proxy, None, **options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 634, in create
dyn_shape = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <class 'range'>(*(s0,), **{}):
'SymInt' object cannot be interpreted as an integer
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval speech_transformer FAIL
Running squeezenet1_1...
[2022-10-30 18:44:05,403] torch._dynamo.utils: [ERROR] RMSE (res-fp64): 0.00698, (ref-fp64): 0.00165 and shape=torch.Size([2, 1000])
cuda eval squeezenet1_1 FAIL
Running tacotron2...
[2022-10-30 18:44:23,568] torch._inductor.ir: [WARNING] DeviceCopy
[2022-10-30 18:44:42,584] torch._dynamo.optimizations.training: [WARNING] Unable to use Aot Autograd because of presence of mutation
[2022-10-30 18:44:42,585] torch._inductor.compile_fx: [WARNING] Aot Autograd is not safe to run, so falling back to eager
ERROR:common:Failed running view(*(FakeTensor(FakeTensor(..., device='meta', size=(s0, s4, s1)), cuda:0), s0, s4, -1), **{}):
view(): argument 'size' must be tuple of ints, but found element of type SymFloat at pos 2
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 55, in _run_node
return getattr(args[0], node.target)(*args[1:], **kwargs)
TypeError: view(): argument 'size' must be tuple of ints, but found element of type SymFloat at pos 2
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/tacotron2/model.py", line 505, in forward
encoder_outputs = self.encoder(embedded_inputs, text_lengths)
File "/data/users/ezyang/benchmark/torchbenchmark/models/tacotron2/model.py", line 507, in <graph break in forward>
mel_outputs, gate_outputs, alignments = self.decoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/tacotron2/model.py", line 396, in forward
decoder_input = self.get_go_frame(memory).unsqueeze(0)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/misc.py", line 571, in call_function
return self.obj.call_method(tx, self.name, args, kwargs).add_options(self)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 612, in call_method
return self.__class__.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running view(*(FakeTensor(FakeTensor(..., device='meta', size=(s0, s4, s1)), cuda:0), s0, s4, -1), **{}):
view(): argument 'size' must be tuple of ints, but found element of type SymFloat at pos 2
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval tacotron2 FAIL
Running timm_efficientdet...
ERROR:common:Failed running <built-in method clamp of type object at 0x7f8585a84b20>(*(s1 - s2 + 2*ceiling(s2/2) - 2,), **{'min': 0}):
clamp() received an invalid combination of arguments - got (SymInt, min=int), but expected one of:
* (Tensor input, Tensor min, Tensor max, *, Tensor out)
* (Tensor input, Number min, Number max, *, Tensor out)
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
TypeError: clamp() received an invalid combination of arguments - got (SymInt, min=int), but expected one of:
* (Tensor input, Tensor min, Tensor max, *, Tensor out)
* (Tensor input, Number min, Number max, *, Tensor out)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 807, in CALL_FUNCTION_EX
self.call_function(fn, argsvars.items, kwargsvars.items)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builtin.py", line 344, in call_function
result = handler(tx, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builtin.py", line 398, in _call_min_max
result = variables.TorchVariable(torch.clamp).call_function(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <built-in method clamp of type object at 0x7f8585a84b20>(*(s1 - s2 + 2*ceiling(s2/2) - 2,), **{'min': 0}):
clamp() received an invalid combination of arguments - got (SymInt, min=int), but expected one of:
* (Tensor input, Tensor min, Tensor max, *, Tensor out)
* (Tensor input, Number min, Number max, *, Tensor out)
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval timm_efficientdet FAIL
Running timm_efficientnet...
cuda eval timm_efficientnet PASS
Running timm_regnet...
cuda eval timm_regnet PASS
Running timm_resnest...
cuda eval timm_resnest PASS
Running timm_vision_transformer...
cuda eval timm_vision_transformer PASS
Running timm_vision_transformer_large...
cuda eval timm_vision_transformer_large PASS
Running timm_vovnet...
cuda eval timm_vovnet PASS
Running tts_angular...
ERROR:common:Failed running self_lstm(*(FakeTensor(FakeTensor(..., device='meta', size=(s0, s1, 40)), cuda:0),), **{}):
Cannot call sizes() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 58, in _run_node
return nnmodule(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/rnn.py", line 776, in forward
result = _VF.lstm(input, hx, self._flat_weights, self.bias, self.num_layers,
RuntimeError: Cannot call sizes() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/tts_angular/model.py", line 59, in forward
d = self.layers(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/tts_angular/model.py", line 17, in forward
self.lstm.flatten_parameters()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 201, in call_function
return variables.TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running self_lstm(*(FakeTensor(FakeTensor(..., device='meta', size=(s0, s1, 40)), cuda:0),), **{}):
Cannot call sizes() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval tts_angular FAIL
Running vgg16...
cuda eval vgg16 PASS
Running vision_maskrcnn...
ERROR:common:Failed running <function interpolate at 0x7f18f2f628b0>(*(FakeTensor(FakeTensor(..., device='meta', size=(1, s0, s1, s2)), cuda:0),), **{'size': None, 'scale_factor': 1.8735363483428955, 'mode': 'bilinear', 'recompute_scale_factor': True, 'align_corners': False}):
sym_int NYI
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/functional.py", line 3954, in interpolate
return torch._C._nn.upsample_bilinear2d(input, output_size, align_corners, scale_factors)
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 785, in __torch_dispatch__
return decomposition_table[func](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 68, in inner
r = f(*tree_map(increase_prec, args), **tree_map(increase_prec, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 1901, in upsample_bilinear2d_vec
i = torch.arange(sym_int(out_h), dtype=input.dtype, device=input.device)
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 116, in sym_int
return a.__sym_int__()
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 251, in __sym_int__
return SymInt(self.node.sym_int())
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 205, in sym_int
raise NotImplementedError("sym_int NYI")
NotImplementedError: sym_int NYI
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/vision/torchvision/models/detection/generalized_rcnn.py", line 83, in forward
images, targets = self.transform(images, targets)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/vision/torchvision/models/detection/transform.py", line 130, in forward
image, target_index = self.resize(image, target_index)
File "/data/users/ezyang/vision/torchvision/models/detection/transform.py", line 181, in resize
image, target = _resize_image_and_masks(image, size, float(self.max_size), target, self.fixed_size)
File "/data/users/ezyang/vision/torchvision/models/detection/transform.py", line 50, in _resize_image_and_masks
scale_factor = scale.item()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 819, in CALL_FUNCTION_KW
self.call_function(fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <function interpolate at 0x7f18f2f628b0>(*(FakeTensor(FakeTensor(..., device='meta', size=(1, s0, s1, s2)), cuda:0),), **{'size': None, 'scale_factor': 1.8735363483428955, 'mode': 'bilinear', 'recompute_scale_factor': True, 'align_corners': False}):
sym_int NYI
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval vision_maskrcnn FAIL
Running yolov3...
ERROR:common:name 's2' is not defined
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 329, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/yolov3/yolo_models.py", line 238, in forward
return self.forward_once(x)
File "/data/users/ezyang/benchmark/torchbenchmark/models/yolov3/yolo_models.py", line 265, in forward_once
def forward_once(self, x, augment=False, verbose=False):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 885, in new_func
return compiled_fn(args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 340, in new_fn
fw_outs = call_func_with_args(compiled_fw, args, disable_amp=disable_amp)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 296, in call_func_with_args
out = normalize_as_list(f(args))
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 186, in run
return model(new_inputs)
File "/tmp/torchinductor_ezyang/6k/c6kv3o5udiinu7zrxyvffky3uxr4b5egh7tsxzflmee4cy6pmnvy.py", line 197, in call
return (buf11, buf5, s2, s3, )
NameError: name 's2' is not defined
TorchDynamo optimized model failed to run because of following error
cuda eval yolov3 FAIL
Running AlbertForMaskedLM...
ERROR:common:AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 1008, in <graph break in forward>
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Symbol' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 990, in forward
outputs = self.albert(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 990, in <graph break in forward>
outputs = self.albert(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 1008, in <graph break in forward>
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval AlbertForMaskedLM FAIL
Running AlbertForQuestionAnswering...
cuda eval AlbertForQuestionAnswering PASS
Running AllenaiLongformerBase...
[2022-10-30 18:55:31,867] torch._dynamo.variables.builtin: [WARNING] incorrect arg count <bound method BuiltinVariable._call_min_max of BuiltinVariable(max)> missing a required argument: 'b'
[2022-10-30 18:55:31,895] torch._dynamo.variables.builtin: [WARNING] incorrect arg count <bound method BuiltinVariable._call_min_max of BuiltinVariable(max)> missing a required argument: 'b'
[2022-10-30 18:55:31,918] torch._dynamo.variables.builtin: [WARNING] incorrect arg count <bound method BuiltinVariable._call_min_max of BuiltinVariable(max)> missing a required argument: 'b'
[2022-10-30 18:55:32,597] torch._dynamo.variables.builtin: [WARNING] incorrect arg count <bound method BuiltinVariable._call_min_max of BuiltinVariable(max)> missing a required argument: 'b'
[2022-10-30 18:55:35,073] torch._inductor.ir: [WARNING] Using FallbackKernel: aten.cumsum
[2022-10-30 18:55:36,661] torch._inductor.graph: [WARNING] Creating implicit fallback for:
target: <built-in function eq>
args[0]: 768
args[1]: 768
ERROR:common:AttributeError: 'bool' object has no attribute 'device'
target: <built-in function eq>
args[0]: 768
args[1]: 768
While executing %eq : [#users=1] = call_function[target=operator.eq](args = (%sym_size_2, 768), kwargs = {})
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2922, in create
example_output.device,
AttributeError: 'bool' object has no attribute 'device'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1813, in forward
outputs = self.longformer(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1696, in forward
padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1715, in <graph break in forward>
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1265, in forward
is_global_attn = is_index_global_attn.flatten().any().item()
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1297, in <graph break in forward>
layer_outputs = layer_module(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1221, in forward
self_attn_outputs = self.attention(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1157, in forward
self_outputs = self.self(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 542, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'bool' object has no attribute 'device'
target: <built-in function eq>
args[0]: 768
args[1]: 768
While executing %eq : [#users=1] = call_function[target=operator.eq](args = (%sym_size_2, 768), kwargs = {})
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval AllenaiLongformerBase FAIL
Running BartForCausalLM...
[2022-10-30 18:55:55,818] torch._inductor.ir: [WARNING] DeviceCopy
ERROR:common:AttributeError: 'Integer' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: 1024
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 1859, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Integer' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 1839, in forward
outputs = self.model.decoder(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 1839, in <graph break in forward>
outputs = self.model.decoder(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Integer' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: 1024
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 1859, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval BartForCausalLM FAIL
Running BartForConditionalGeneration...
ERROR:common:TypeError: Argument of Integer should be of numeric type, got s0.
target: aten.new_zeros.default
args[0]: TensorBox(StorageBox(
InputBuffer(name='arg0_1', layout=FixedLayout('cuda', torch.int64, size=[1, s0], stride=[s0, 1]))
))
args[1]: [1, s0]
kwargs: {'dtype': torch.int64, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %new_zeros : [#users=6] = call_function[target=torch.ops.aten.new_zeros.default](args = (%arg0_1, [%sym_size, %sym_size_1]), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 79, in shift_tokens_right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 1349, in forward
decoder_input_ids = shift_tokens_right(
Traceback (most recent call last):
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/sympy-1.11rc1-py3.9.egg/sympy/core/numbers.py", line 2095, in __new__
ival = int(i)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/sympy-1.11rc1-py3.9.egg/sympy/core/expr.py", line 320, in __int__
raise TypeError("Cannot convert symbols to int")
TypeError: Cannot convert symbols to int
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1548, in _new_constant
size = [sympy.Integer(s) for s in size]
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1548, in <listcomp>
size = [sympy.Integer(s) for s in size]
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/sympy-1.11rc1-py3.9.egg/sympy/core/cache.py", line 70, in wrapper
retval = cfunc(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/sympy-1.11rc1-py3.9.egg/sympy/core/numbers.py", line 2097, in __new__
raise TypeError(
TypeError: Argument of Integer should be of numeric type, got s0.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 1312, in forward
@add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: TypeError: Argument of Integer should be of numeric type, got s0.
target: aten.new_zeros.default
args[0]: TensorBox(StorageBox(
InputBuffer(name='arg0_1', layout=FixedLayout('cuda', torch.int64, size=[1, s0], stride=[s0, 1]))
))
args[1]: [1, s0]
kwargs: {'dtype': torch.int64, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %new_zeros : [#users=6] = call_function[target=torch.ops.aten.new_zeros.default](args = (%arg0_1, [%sym_size, %sym_size_1]), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 79, in shift_tokens_right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 1349, in forward
decoder_input_ids = shift_tokens_right(
TorchDynamo optimized model failed to run because of following error
cuda eval BartForConditionalGeneration FAIL
Running BertForMaskedLM...
ERROR:common:AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 1371, in <graph break in forward>
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Symbol' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 1351, in forward
outputs = self.bert(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 1351, in <graph break in forward>
outputs = self.bert(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 1371, in <graph break in forward>
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval BertForMaskedLM FAIL
Running BertForQuestionAnswering...
cuda eval BertForQuestionAnswering PASS
Running BigBird...
ERROR:common:(False, 1, s0)
While executing return (False, sym_size, sym_size_1)
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/big_bird/modeling_big_bird.py", line 2462, in forward
outputs = self.bert(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/big_bird/modeling_big_bird.py", line 2104, in forward
blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/big_bird/modeling_big_bird.py", line 2185, in create_masks_for_block_sparse_attn
@staticmethod
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 267, in output
assert all(
AssertionError: (False, 1, s0)
While executing return (False, sym_size, sym_size_1)
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval BigBird FAIL
Running BlenderbotSmallForCausalLM...
[2022-10-30 18:58:40,220] torch._inductor.ir: [WARNING] DeviceCopy
ERROR:common:AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py", line 1549, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Symbol' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py", line 1529, in forward
outputs = self.model.decoder(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py", line 1529, in <graph break in forward>
outputs = self.model.decoder(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py", line 1549, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval BlenderbotSmallForCausalLM FAIL
Running BlenderbotSmallForConditionalGeneration...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py", line 120, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py", line 736, in forward
embed_pos = self.embed_positions(input_shape)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py", line 1292, in forward
outputs = self.model(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py", line 1155, in forward
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py", line 670, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py", line 120, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py", line 736, in forward
embed_pos = self.embed_positions(input_shape)
TorchDynamo optimized model failed to run because of following error
cuda eval BlenderbotSmallForConditionalGeneration FAIL
Running CamemBert...
[2022-10-30 18:59:26,620] torch._inductor.ir: [WARNING] Using FallbackKernel: aten.cumsum
ERROR:common:AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/roberta/modeling_roberta.py", line 1114, in <graph break in forward>
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Symbol' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/roberta/modeling_roberta.py", line 1095, in forward
outputs = self.roberta(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/roberta/modeling_roberta.py", line 1095, in <graph break in forward>
outputs = self.roberta(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/roberta/modeling_roberta.py", line 1114, in <graph break in forward>
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval CamemBert FAIL
Running DebertaForMaskedLM...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 1041, in forward
outputs = self.deberta(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 946, in forward
embedding_output = self.embeddings(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 954, in <graph break in forward>
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 447, in forward
hidden_states = layer_module(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 352, in forward
attention_output = self.attention(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 285, in forward
self_output = self.self(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 285, in <graph break in forward>
self_output = self.self(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda eval DebertaForMaskedLM FAIL
Running DebertaForQuestionAnswering...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 1369, in forward
outputs = self.deberta(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 946, in forward
embedding_output = self.embeddings(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 954, in <graph break in forward>
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 447, in forward
hidden_states = layer_module(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 352, in forward
attention_output = self.attention(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 285, in forward
self_output = self.self(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/deberta/modeling_deberta.py", line 285, in <graph break in forward>
self_output = self.self(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda eval DebertaForQuestionAnswering FAIL
WARNING:__main__:Sequence Length not defined for DistilBertForMaskedLM. Choosing 128 arbitrarily
Running DistilBertForMaskedLM...
ERROR:common:AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_mlm_loss_fct': <class 'torch.nn.modules.loss.CrossEntropyLoss'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 666, in <graph break in forward>
mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Symbol' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 649, in forward
dlbrt_output = self.distilbert(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 649, in <graph break in forward>
dlbrt_output = self.distilbert(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_mlm_loss_fct': <class 'torch.nn.modules.loss.CrossEntropyLoss'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 666, in <graph break in forward>
mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval DistilBertForMaskedLM FAIL
WARNING:__main__:Sequence Length not defined for DistilBertForQuestionAnswering. Choosing 128 arbitrarily
Running DistilBertForQuestionAnswering...
cuda eval DistilBertForQuestionAnswering PASS
Running DistillGPT2...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 793, in forward
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 1048, in forward
transformer_outputs = self.transformer(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 738, in forward
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 793, in forward
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
TorchDynamo optimized model failed to run because of following error
cuda eval DistillGPT2 FAIL
If you want to use `ElectraForCausalLM` as a standalone, add `is_decoder=True.`
Running ElectraForCausalLM...
ERROR:common:AttributeError: 'Add' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0 - 1
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/electra/modeling_electra.py", line 1646, in <graph break in forward>
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Add' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/electra/modeling_electra.py", line 1621, in forward
outputs = self.electra(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/electra/modeling_electra.py", line 1621, in <graph break in forward>
outputs = self.electra(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Add' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0 - 1
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/electra/modeling_electra.py", line 1646, in <graph break in forward>
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval ElectraForCausalLM FAIL
Running ElectraForQuestionAnswering...
cuda eval ElectraForQuestionAnswering PASS
Running GPT2ForSequenceClassification...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 793, in forward
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 1378, in forward
transformer_outputs = self.transformer(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 738, in forward
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 793, in forward
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
TorchDynamo optimized model failed to run because of following error
cuda eval GPT2ForSequenceClassification FAIL
Running GoogleFnet...
ERROR:common:aten.view_as_real.default - couldn't find symbolic meta function/decomposition
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/fnet/modeling_fnet.py", line 763, in forward
outputs = self.fnet(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/fnet/modeling_fnet.py", line 604, in forward
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/fnet/modeling_fnet.py", line 308, in forward
layer_outputs = layer_module(hidden_states)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/fnet/modeling_fnet.py", line 267, in forward
self_fourier_outputs = self.fourier(hidden_states)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/fnet/modeling_fnet.py", line 220, in forward
self_outputs = self.self(hidden_states)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 351, in transform
tracer = InstructionTranslator(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1418, in __init__
self.symbolic_locals = collections.OrderedDict(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1419, in <genexpr>
(k, VariableBuilder(self, LocalSource(k))(f_locals[k]))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builder.py", line 129, in __call__
return self._wrap(value).clone(**self.options())
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builder.py", line 203, in _wrap
output = [
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builder.py", line 204, in <listcomp>
VariableBuilder(self.tx, GetItemSource(self.get_source(), i))(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builder.py", line 129, in __call__
return self._wrap(value).clone(**self.options())
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builder.py", line 193, in _wrap
return self.wrap_tensor(value)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builder.py", line 539, in wrap_tensor
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 210, in create
example_value = fake_wrapper(example_value)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 725, in wrap_to_fake_tensor_and_record
return wrap_fake_exception(lambda: make_fake_tensor(e, tx.fake_mode, tx))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 725, in <lambda>
return wrap_fake_exception(lambda: make_fake_tensor(e, tx.fake_mode, tx))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 671, in make_fake_tensor
fake_tensor = fake_mode.from_tensor(
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 931, in from_tensor
return self.fake_tensor_converter(self, tensor, shape_env=self.shape_env)
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 262, in __call__
return self.from_real_tensor(fake_mode, t, make_constant, shape_env=shape_env)
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 234, in from_real_tensor
out = self.meta_converter(
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/meta_utils.py", line 369, in __call__
r = self.meta_tensor(t, shape_env=shape_env, callback=callback)
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/meta_utils.py", line 226, in meta_tensor
base = torch.view_as_real(base)
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 541, in __torch_dispatch__
return func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 257, in __call__
return self._op(*args, **kwargs or {})
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 806, in __torch_dispatch__
raise RuntimeError(
RuntimeError: aten.view_as_real.default - couldn't find symbolic meta function/decomposition
TorchDynamo optimized model failed to run because of following error
cuda eval GoogleFnet FAIL
Running LayoutLMForMaskedLM...
ERROR:common:AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/layoutlm/modeling_layoutlm.py", line 956, in <graph break in forward>
masked_lm_loss = loss_fct(
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Symbol' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/layoutlm/modeling_layoutlm.py", line 935, in forward
outputs = self.layoutlm(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/layoutlm/modeling_layoutlm.py", line 935, in <graph break in forward>
outputs = self.layoutlm(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/layoutlm/modeling_layoutlm.py", line 956, in <graph break in forward>
masked_lm_loss = loss_fct(
TorchDynamo optimized model failed to run because of following error
cuda eval LayoutLMForMaskedLM FAIL
Running LayoutLMForSequenceClassification...
ERROR:common:AttributeError: 'One' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: 1
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/layoutlm/modeling_layoutlm.py", line 1093, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'One' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/layoutlm/modeling_layoutlm.py", line 1057, in forward
outputs = self.layoutlm(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/layoutlm/modeling_layoutlm.py", line 1081, in <graph break in forward>
self.config.problem_type = "single_label_classification"
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/layoutlm/modeling_layoutlm.py", line 1081, in <graph break in forward>
self.config.problem_type = "single_label_classification"
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'One' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: 1
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/layoutlm/modeling_layoutlm.py", line 1093, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval LayoutLMForSequenceClassification FAIL
WARNING:__main__:Sequence Length not defined for M2M100ForConditionalGeneration. Choosing 128 arbitrarily
Running M2M100ForConditionalGeneration...
[2022-10-30 19:05:37,304] torch._inductor.ir: [WARNING] Using FallbackKernel: aten.cumsum
[2022-10-30 19:05:37,309] torch._inductor.graph: [WARNING] Creating implicit fallback for:
target: <built-in function gt>
args[0]: s0 + 2
args[1]: 1026
ERROR:common:AttributeError: 'StrictGreaterThan' object has no attribute 'device'
target: <built-in function gt>
args[0]: s0 + 2
args[1]: 1026
While executing %gt : [#users=1] = call_function[target=operator.gt](args = (%add_3, 1026), kwargs = {})
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2922, in create
example_output.device,
AttributeError: 'StrictGreaterThan' object has no attribute 'device'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/m2m_100/modeling_m2m_100.py", line 1317, in forward
outputs = self.model(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/m2m_100/modeling_m2m_100.py", line 1190, in forward
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/m2m_100/modeling_m2m_100.py", line 782, in forward
embed_pos = self.embed_positions(input_ids, inputs_embeds)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/m2m_100/modeling_m2m_100.py", line 159, in forward
@torch.no_grad()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'StrictGreaterThan' object has no attribute 'device'
target: <built-in function gt>
args[0]: s0 + 2
args[1]: 1026
While executing %gt : [#users=1] = call_function[target=operator.gt](args = (%add_3, 1026), kwargs = {})
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval M2M100ForConditionalGeneration FAIL
WARNING:__main__:Sequence Length not defined for MBartForCausalLM. Choosing 128 arbitrarily
Running MBartForCausalLM...
[2022-10-30 19:05:56,280] torch._inductor.ir: [WARNING] DeviceCopy
ERROR:common:AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mbart/modeling_mbart.py", line 1856, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Symbol' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mbart/modeling_mbart.py", line 1836, in forward
outputs = self.model.decoder(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mbart/modeling_mbart.py", line 1836, in <graph break in forward>
outputs = self.model.decoder(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mbart/modeling_mbart.py", line 1856, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval MBartForCausalLM FAIL
WARNING:__main__:Sequence Length not defined for MBartForConditionalGeneration. Choosing 128 arbitrarily
Running MBartForConditionalGeneration...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.mbart.modeling_mbart.MBartLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mbart/modeling_mbart.py", line 140, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mbart/modeling_mbart.py", line 796, in forward
embed_pos = self.embed_positions(input_shape)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mbart/modeling_mbart.py", line 1346, in forward
outputs = self.model(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mbart/modeling_mbart.py", line 1211, in forward
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mbart/modeling_mbart.py", line 730, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.mbart.modeling_mbart.MBartLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mbart/modeling_mbart.py", line 140, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mbart/modeling_mbart.py", line 796, in forward
embed_pos = self.embed_positions(input_shape)
TorchDynamo optimized model failed to run because of following error
cuda eval MBartForConditionalGeneration FAIL
WARNING:__main__:Sequence Length not defined for MT5ForConditionalGeneration. Choosing 128 arbitrarily
Running MT5ForConditionalGeneration...
WARNING:common:fp64 golden ref were not generated for MT5ForConditionalGeneration
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1537, in forward
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
TorchDynamo optimized model failed to run because of following error
cuda eval MT5ForConditionalGeneration FAIL
If you want to use `MegatronBertForCausalLM` as a standalone, add `is_decoder=True.`
WARNING:__main__:Sequence Length not defined for MegatronBertForCausalLM. Choosing 128 arbitrarily
Running MegatronBertForCausalLM...
ERROR:common:AttributeError: 'Add' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0 - 1
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/megatron_bert/modeling_megatron_bert.py", line 1226, in <graph break in forward>
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Add' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/megatron_bert/modeling_megatron_bert.py", line 1201, in forward
outputs = self.bert(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/megatron_bert/modeling_megatron_bert.py", line 1201, in <graph break in forward>
outputs = self.bert(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Add' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0 - 1
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/megatron_bert/modeling_megatron_bert.py", line 1226, in <graph break in forward>
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval MegatronBertForCausalLM FAIL
WARNING:__main__:Sequence Length not defined for MegatronBertForQuestionAnswering. Choosing 128 arbitrarily
Running MegatronBertForQuestionAnswering...
cuda eval MegatronBertForQuestionAnswering PASS
WARNING:__main__:Sequence Length not defined for MobileBertForMaskedLM. Choosing 128 arbitrarily
Running MobileBertForMaskedLM...
ERROR:common:AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s1
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mobilebert/modeling_mobilebert.py", line 1107, in <graph break in forward>
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Symbol' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mobilebert/modeling_mobilebert.py", line 1089, in forward
outputs = self.mobilebert(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mobilebert/modeling_mobilebert.py", line 1089, in <graph break in forward>
outputs = self.mobilebert(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s1
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mobilebert/modeling_mobilebert.py", line 1107, in <graph break in forward>
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval MobileBertForMaskedLM FAIL
WARNING:__main__:Sequence Length not defined for MobileBertForQuestionAnswering. Choosing 128 arbitrarily
Running MobileBertForQuestionAnswering...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mobilebert/modeling_mobilebert.py", line 1395, in forward
outputs = self.mobilebert(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/mobilebert/modeling_mobilebert.py", line 1395, in <graph break in forward>
outputs = self.mobilebert(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda eval MobileBertForQuestionAnswering FAIL
WARNING:__main__:Sequence Length not defined for OPTForCausalLM. Choosing 128 arbitrarily
Running OPTForCausalLM...
[2022-10-30 19:12:58,090] torch._inductor.ir: [WARNING] Using FallbackKernel: aten.cumsum
ERROR:common:name 's0' is not defined
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/opt/modeling_opt.py", line 918, in forward
outputs = self.model.decoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/opt/modeling_opt.py", line 534, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 885, in new_func
return compiled_fn(args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 340, in new_fn
fw_outs = call_func_with_args(compiled_fw, args, disable_amp=disable_amp)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 296, in call_func_with_args
out = normalize_as_list(f(args))
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 186, in run
return model(new_inputs)
File "/tmp/torchinductor_ezyang/fy/cfy36ou2zyogaogviyu2uv2l5orhn2d34hybdcvheitytp3fi5xf.py", line 127, in call
return (buf3, 1, s0, buf0, buf4, )
NameError: name 's0' is not defined
TorchDynamo optimized model failed to run because of following error
cuda eval OPTForCausalLM FAIL
WARNING:__main__:Sequence Length not defined for PLBartForCausalLM. Choosing 128 arbitrarily
Running PLBartForCausalLM...
[2022-10-30 19:13:15,152] torch._inductor.ir: [WARNING] DeviceCopy
ERROR:common:AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/plbart/modeling_plbart.py", line 1700, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Symbol' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/plbart/modeling_plbart.py", line 1680, in forward
outputs = self.model.decoder(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/plbart/modeling_plbart.py", line 1680, in <graph break in forward>
outputs = self.model.decoder(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/plbart/modeling_plbart.py", line 1700, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval PLBartForCausalLM FAIL
WARNING:__main__:Sequence Length not defined for PLBartForConditionalGeneration. Choosing 128 arbitrarily
Running PLBartForConditionalGeneration...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.plbart.modeling_plbart.PLBartLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/plbart/modeling_plbart.py", line 137, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/plbart/modeling_plbart.py", line 772, in forward
embed_pos = self.embed_positions(input_shape)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/plbart/modeling_plbart.py", line 1314, in forward
outputs = self.model(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/plbart/modeling_plbart.py", line 1182, in forward
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/plbart/modeling_plbart.py", line 706, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.plbart.modeling_plbart.PLBartLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/plbart/modeling_plbart.py", line 137, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/plbart/modeling_plbart.py", line 772, in forward
embed_pos = self.embed_positions(input_shape)
TorchDynamo optimized model failed to run because of following error
cuda eval PLBartForConditionalGeneration FAIL
WARNING:__main__:Sequence Length not defined for PegasusForCausalLM. Choosing 128 arbitrarily
Running PegasusForCausalLM...
[2022-10-30 19:14:04,862] torch._inductor.ir: [WARNING] DeviceCopy
ERROR:common:AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/pegasus/modeling_pegasus.py", line 1679, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Symbol' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/pegasus/modeling_pegasus.py", line 1659, in forward
outputs = self.model.decoder(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/pegasus/modeling_pegasus.py", line 1659, in <graph break in forward>
outputs = self.model.decoder(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/pegasus/modeling_pegasus.py", line 1679, in <graph break in forward>
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval PegasusForCausalLM FAIL
WARNING:__main__:Sequence Length not defined for PegasusForConditionalGeneration. Choosing 128 arbitrarily
Running PegasusForConditionalGeneration...
[2022-10-30 19:15:20,913] torch._inductor.ir: [WARNING] DeviceCopy
ERROR:common:AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/pegasus/modeling_pegasus.py", line 1421, in <graph break in forward>
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Symbol' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/pegasus/modeling_pegasus.py", line 1399, in forward
outputs = self.model(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/pegasus/modeling_pegasus.py", line 1399, in <graph break in forward>
outputs = self.model(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Symbol' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/pegasus/modeling_pegasus.py", line 1421, in <graph break in forward>
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval PegasusForConditionalGeneration FAIL
If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`
Running RobertaForCausalLM...
[2022-10-30 19:16:09,678] torch._inductor.ir: [WARNING] Using FallbackKernel: aten.cumsum
ERROR:common:AttributeError: 'Add' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0 - 1
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/roberta/modeling_roberta.py", line 996, in <graph break in forward>
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1600, in full
return tensor_constructor(fill_value)(size, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1493, in inner
return _full(fill_value, device, dtype, size)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1454, in _full
assert len(value.get_size()) == 0
AttributeError: 'Add' object has no attribute 'get_size'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/roberta/modeling_roberta.py", line 971, in forward
outputs = self.roberta(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/roberta/modeling_roberta.py", line 971, in <graph break in forward>
outputs = self.roberta(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Add' object has no attribute 'get_size'
target: aten.full.default
args[0]: []
args[1]: s0 - 1
kwargs: {'dtype': torch.float32, 'layout': torch.strided, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %full : [#users=1] = call_function[target=torch.ops.aten.full.default](args = ([], %sym_numel), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/roberta/modeling_roberta.py", line 996, in <graph break in forward>
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
TorchDynamo optimized model failed to run because of following error
cuda eval RobertaForCausalLM FAIL
Running RobertaForQuestionAnswering...
[2022-10-30 19:16:40,698] torch._inductor.ir: [WARNING] Using FallbackKernel: aten.cumsum
[2022-10-30 19:16:55,508] torch._inductor.ir: [WARNING] Using FallbackKernel: aten.cumsum
cuda eval RobertaForQuestionAnswering PASS
WARNING:__main__:Sequence Length not defined for Speech2Text2ForCausalLM. Choosing 128 arbitrarily
Running Speech2Text2ForCausalLM...
[2022-10-30 19:17:15,958] torch._inductor.ir: [WARNING] DeviceCopy
[2022-10-30 19:17:16,301] torch._inductor.ir: [WARNING] Using FallbackKernel: aten.cumsum
[2022-10-30 19:17:16,305] torch._inductor.graph: [WARNING] Creating implicit fallback for:
target: <built-in function gt>
args[0]: s0 + 2
args[1]: 1026
ERROR:common:AttributeError: 'StrictGreaterThan' object has no attribute 'device'
target: <built-in function gt>
args[0]: s0 + 2
args[1]: 1026
While executing %gt : [#users=1] = call_function[target=operator.gt](args = (%add_2, 1026), kwargs = {})
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2922, in create
example_output.device,
AttributeError: 'StrictGreaterThan' object has no attribute 'device'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py", line 910, in forward
outputs = self.model.decoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py", line 613, in forward
attention_mask = self._prepare_decoder_attention_mask(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py", line 623, in <graph break in forward>
positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py", line 116, in forward
@torch.no_grad()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'StrictGreaterThan' object has no attribute 'device'
target: <built-in function gt>
args[0]: s0 + 2
args[1]: 1026
While executing %gt : [#users=1] = call_function[target=operator.gt](args = (%add_2, 1026), kwargs = {})
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval Speech2Text2ForCausalLM FAIL
Running T5ForConditionalGeneration...
WARNING:common:fp64 golden ref were not generated for T5ForConditionalGeneration
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1537, in forward
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
TorchDynamo optimized model failed to run because of following error
cuda eval T5ForConditionalGeneration FAIL
Running T5Small...
WARNING:common:fp64 golden ref were not generated for T5Small
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1537, in forward
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
TorchDynamo optimized model failed to run because of following error
cuda eval T5Small FAIL
WARNING:__main__:Sequence Length not defined for TrOCRForCausalLM. Choosing 128 arbitrarily
Running TrOCRForCausalLM...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.trocr.modeling_trocr.TrOCRLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/trocr/modeling_trocr.py", line 93, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/trocr/modeling_trocr.py", line 643, in forward
embed_pos = self.embed_positions(input_shape, past_key_values_length=past_key_values_length)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/trocr/modeling_trocr.py", line 953, in forward
outputs = self.model.decoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/trocr/modeling_trocr.py", line 538, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s0
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s0 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s0
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.trocr.modeling_trocr.TrOCRLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/trocr/modeling_trocr.py", line 93, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/trocr/modeling_trocr.py", line 643, in forward
embed_pos = self.embed_positions(input_shape, past_key_values_length=past_key_values_length)
TorchDynamo optimized model failed to run because of following error
cuda eval TrOCRForCausalLM FAIL
WARNING:__main__:Sequence Length not defined for XGLMForCausalLM. Choosing 128 arbitrarily
Running XGLMForCausalLM...
[2022-10-30 19:18:45,690] torch._inductor.ir: [WARNING] DeviceCopy
[2022-10-30 19:18:46,050] torch._inductor.ir: [WARNING] Using FallbackKernel: aten.cumsum
[2022-10-30 19:18:46,055] torch._inductor.graph: [WARNING] Creating implicit fallback for:
target: <built-in function gt>
args[0]: s0 + 2
args[1]: 2050
ERROR:common:AttributeError: 'StrictGreaterThan' object has no attribute 'device'
target: <built-in function gt>
args[0]: s0 + 2
args[1]: 2050
While executing %gt : [#users=1] = call_function[target=operator.gt](args = (%add_3, 2050), kwargs = {})
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2922, in create
example_output.device,
AttributeError: 'StrictGreaterThan' object has no attribute 'device'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/xglm/modeling_xglm.py", line 889, in forward
outputs = self.model(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/xglm/modeling_xglm.py", line 701, in forward
attention_mask = self._prepare_decoder_attention_mask(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/xglm/modeling_xglm.py", line 711, in <graph break in forward>
positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/xglm/modeling_xglm.py", line 201, in forward
@torch.no_grad()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'StrictGreaterThan' object has no attribute 'device'
target: <built-in function gt>
args[0]: s0 + 2
args[1]: 2050
While executing %gt : [#users=1] = call_function[target=operator.gt](args = (%add_3, 2050), kwargs = {})
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval XGLMForCausalLM FAIL
Running XLNetLMHeadModel...
[2022-10-30 19:19:56,810] torch._inductor.graph: [WARNING] Creating implicit fallback for:
target: <built-in function neg>
args[0]: s0
ERROR:common:AttributeError: 'Mul' object has no attribute 'device'
target: <built-in function neg>
args[0]: s0
While executing %neg : [#users=1] = call_function[target=operator.neg](args = (%sym_size,), kwargs = {})
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2922, in create
example_output.device,
AttributeError: 'Mul' object has no attribute 'device'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 434, in forward_pass
def forward_pass(self, mod, inputs, collect_outputs=True):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Mul' object has no attribute 'device'
target: <built-in function neg>
args[0]: s0
While executing %neg : [#users=1] = call_function[target=operator.neg](args = (%sym_size,), kwargs = {})
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval XLNetLMHeadModel FAIL
Running YituTechConvBert...
[2022-10-30 19:20:35,759] torch._inductor.graph: [WARNING] Creating implicit fallback for:
target: <built-in function sub>
args[0]: 520
args[1]: 8
ERROR:common:AttributeError: 'Integer' object has no attribute 'device'
target: <built-in function sub>
args[0]: 520
args[1]: 8
While executing %sub_1 : [#users=1] = call_function[target=operator.sub](args = (%add_1, 8), kwargs = {})
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2922, in create
example_output.device,
AttributeError: 'Integer' object has no attribute 'device'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 435, in forward_pass
return mod(**inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/convbert/modeling_convbert.py", line 928, in forward
generator_hidden_states = self.convbert(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/convbert/modeling_convbert.py", line 853, in forward
hidden_states = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/convbert/modeling_convbert.py", line 616, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Integer' object has no attribute 'device'
target: <built-in function sub>
args[0]: 520
args[1]: 8
While executing %sub_1 : [#users=1] = call_function[target=operator.sub](args = (%add_1, 8), kwargs = {})
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval YituTechConvBert FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running adv_inception_v3...
cuda eval adv_inception_v3 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running beit_base_patch16_224...
cuda eval beit_base_patch16_224 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running botnet26t_256...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/timm_models.py", line 314, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/byobnet.py", line 1559, in forward
x = self.forward_features(x)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/byobnet.py", line 1551, in forward_features
x = self.stages(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/byobnet.py", line 1245, in forward
x = self.self_attn(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/layers/bottleneck_attn.py", line 152, in forward
attn = (q @ k) * self.scale + self.pos_embed(q)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/layers/bottleneck_attn.py", line 68, in forward
def forward(self, q):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda eval botnet26t_256 FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running cait_m36_384...
[2022-10-30 19:28:20,398] torch._dynamo.utils: [ERROR] RMSE (res-fp64): 1.16588, (ref-fp64): 0.00034 and shape=torch.Size([2, 1000])
cuda eval cait_m36_384 FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running coat_lite_mini...
ERROR:common:name 's1' is not defined
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/timm_models.py", line 314, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/coat.py", line 607, in forward
x_feat = self.forward_features(x)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/coat.py", line 516, in forward_features
x1 = blk(x1, size=(H1, W1))
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/coat.py", line 221, in forward
cur = self.factoratt_crpe(cur, size)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/coat.py", line 144, in forward
def forward(self, x, size: Tuple[int, int]):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 885, in new_func
return compiled_fn(args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 340, in new_fn
fw_outs = call_func_with_args(compiled_fw, args, disable_amp=disable_amp)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 296, in call_func_with_args
out = normalize_as_list(f(args))
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 186, in run
return model(new_inputs)
File "/tmp/torchinductor_ezyang/jt/cjt6afonrjeybr4yb6chxq3hh2w2t2hyiuhraieqdjrx2df2labv.py", line 277, in call
return (as_strided(buf0, (s0, 8, 3137, 8), (602304, 8, 192, 1)), as_strided(buf0, (s0, 8, 3137, 8), (602304, 8, 192, 1), 128), s0, s1, 64, as_strided(buf9, (s0, 8, 3137, 8), (200768, 25096, 8, 1)), )
NameError: name 's1' is not defined
TorchDynamo optimized model failed to run because of following error
cuda eval coat_lite_mini FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running convit_base...
WARNING:common:fp64 golden ref were not generated for convit_base
ERROR:common:(True, s0, s1, s2)
While executing return (True, sym_size, sym_size_1, sym_size_2)
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/timm_models.py", line 314, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/convit.py", line 333, in forward
x = self.forward_features(x)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/convit.py", line 323, in forward_features
x = blk(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/convit.py", line 214, in forward
x = x + self.drop_path(self.attn(self.norm1(x)))
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/convit.py", line 83, in forward
def forward(self, x):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 267, in output
assert all(
AssertionError: (True, s0, s1, s2)
While executing return (True, sym_size, sym_size_1, sym_size_2)
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval convit_base FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running convmixer_768_32...
cuda eval convmixer_768_32 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running convnext_base...
cuda train convnext_base FAIL (TIMEOUT)
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running crossvit_9_240...
ERROR:common:(False, 240, 240)
While executing return (False, sym_size, sym_size_1)
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/timm_models.py", line 314, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/crossvit.py", line 418, in forward
xs = self.forward_features(x)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/crossvit.py", line 394, in forward_features
x_ = scale_image(x_, ss, self.crop_scale)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/crossvit.py", line 264, in scale_image
@register_notrace_function
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 267, in output
assert all(
AssertionError: (False, 240, 240)
While executing return (False, sym_size, sym_size_1)
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval crossvit_9_240 FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running cspdarknet53...
ERROR:common:(TensorBox(StorageBox(
ComputedBuffer(name='buf5', layout=FlexibleLayout('cuda', torch.float32, size=[s0, 128, 128, 128], stride=[2097152, 16384, 128, 1]), data=Pointwise(
'cuda',
torch.float32,
where(load(buf4, i3 + 128 * i2 + 16384 * i1 + 2097152 * i0) > constant(0, torch.float32), load(buf4, i3 + 128 * i2 + 16384 * i1 + 2097152 * i0), load(buf4, i3 + 128 * i2 + 16384 * i1 + 2097152 * i0) * constant(0.01, torch.float32)),
ranges=[s0, 128, 128, 128],
origins={where_1}
))
)), 64)
While executing return (add_3, 64)
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/timm_models.py", line 314, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/cspnet.py", line 422, in forward
x = self.forward_features(x)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/cspnet.py", line 415, in forward_features
x = self.stages(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/cspnet.py", line 271, in forward
def forward(self, x):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 267, in output
assert all(
AssertionError: (TensorBox(StorageBox(
ComputedBuffer(name='buf5', layout=FlexibleLayout('cuda', torch.float32, size=[s0, 128, 128, 128], stride=[2097152, 16384, 128, 1]), data=Pointwise(
'cuda',
torch.float32,
where(load(buf4, i3 + 128 * i2 + 16384 * i1 + 2097152 * i0) > constant(0, torch.float32), load(buf4, i3 + 128 * i2 + 16384 * i1 + 2097152 * i0), load(buf4, i3 + 128 * i2 + 16384 * i1 + 2097152 * i0) * constant(0.01, torch.float32)),
ranges=[s0, 128, 128, 128],
origins={where_1}
))
)), 64)
While executing return (add_3, 64)
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval cspdarknet53 FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running deit_base_distilled_patch16_224...
cuda eval deit_base_distilled_patch16_224 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running dla102...
cuda eval dla102 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running dm_nfnet_f0...
ERROR:common:Failed running <built-in method clamp of type object at 0x7f83f102ab20>(*(-s2 + 2*ceiling(s2/2) + 1,), **{'min': 0}):
clamp() received an invalid combination of arguments - got (SymInt, min=int), but expected one of:
* (Tensor input, Tensor min, Tensor max, *, Tensor out)
* (Tensor input, Number min, Number max, *, Tensor out)
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
TypeError: clamp() received an invalid combination of arguments - got (SymInt, min=int), but expected one of:
* (Tensor input, Tensor min, Tensor max, *, Tensor out)
* (Tensor input, Number min, Number max, *, Tensor out)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 807, in CALL_FUNCTION_EX
self.call_function(fn, argsvars.items, kwargsvars.items)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 183, in call_function
tx.call_function(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builtin.py", line 344, in call_function
result = handler(tx, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builtin.py", line 398, in _call_min_max
result = variables.TorchVariable(torch.clamp).call_function(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <built-in method clamp of type object at 0x7f83f102ab20>(*(-s2 + 2*ceiling(s2/2) + 1,), **{'min': 0}):
clamp() received an invalid combination of arguments - got (SymInt, min=int), but expected one of:
* (Tensor input, Tensor min, Tensor max, *, Tensor out)
* (Tensor input, Number min, Number max, *, Tensor out)
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval dm_nfnet_f0 FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running dpn107...
cuda eval dpn107 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running eca_botnext26ts_256...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/timm_models.py", line 314, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/byobnet.py", line 1559, in forward
x = self.forward_features(x)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/byobnet.py", line 1551, in forward_features
x = self.stages(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/byobnet.py", line 1245, in forward
x = self.self_attn(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/layers/bottleneck_attn.py", line 152, in forward
attn = (q @ k) * self.scale + self.pos_embed(q)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/layers/bottleneck_attn.py", line 68, in forward
def forward(self, q):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda eval eca_botnext26ts_256 FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running eca_halonext26ts...
[2022-10-30 19:46:58,380] torch._inductor.graph: [WARNING] Creating implicit fallback for:
target: <built-in function sub>
args[0]: 20
args[1]: 12
ERROR:common:AttributeError: 'Integer' object has no attribute 'device'
target: <built-in function sub>
args[0]: 20
args[1]: 12
While executing %sub : [#users=1] = call_function[target=operator.sub](args = (%sym_size_3, 12), kwargs = {})
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2922, in create
example_output.device,
AttributeError: 'Integer' object has no attribute 'device'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/timm_models.py", line 314, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/byobnet.py", line 1559, in forward
x = self.forward_features(x)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/byobnet.py", line 1551, in forward_features
x = self.stages(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/byobnet.py", line 1245, in forward
x = self.self_attn(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/layers/halo_attn.py", line 169, in forward
def forward(self, x):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Integer' object has no attribute 'device'
target: <built-in function sub>
args[0]: 20
args[1]: 12
While executing %sub : [#users=1] = call_function[target=operator.sub](args = (%sym_size_3, 12), kwargs = {})
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval eca_halonext26ts FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running ese_vovnet19b_dw...
cuda eval ese_vovnet19b_dw PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running fbnetc_100...
cuda eval fbnetc_100 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running fbnetv3_b...
cuda eval fbnetv3_b PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running gernet_l...
cuda eval gernet_l PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running ghostnet_100...
[2022-10-30 19:51:57,457] torch._dynamo.utils: [ERROR] RMSE (res-fp64): 7.43847, (ref-fp64): 0.00107 and shape=torch.Size([2, 1000])
cuda eval ghostnet_100 FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running gluon_inception_v3...
cuda eval gluon_inception_v3 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running gluon_xception65...
cuda eval gluon_xception65 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running gmixer_24_224...
cuda eval gmixer_24_224 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running gmlp_s16_224...
cuda eval gmlp_s16_224 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running hrnet_w18...
ERROR:common:maximum recursion depth exceeded during compilation
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 414, in _compile
check_fn = CheckFunctionManager(output, output.guards, locals, globals)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/guards.py", line 592, in __init__
self.check_fn = self.compile_check_fn(local_builder, global_builder)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/guards.py", line 753, in compile_check_fn
exec(py_code, global_builder.scope, out)
RecursionError: maximum recursion depth exceeded during compilation
TorchDynamo optimized model failed to run because of following error
cuda eval hrnet_w18 FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running inception_v3...
cuda eval inception_v3 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running jx_nest_base...
ERROR:common:module 'operator' has no attribute 'sym_sqrt'
While executing %sym_sqrt : [#users=1] = call_function[target=torch.fx.experimental.symbolic_shapes.sym_sqrt](args = (%getitem_1,), kwargs = {})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/nest.py", line 166, in deblockify
grid_size = int(math.sqrt(T))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/timm_models.py", line 314, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/nest.py", line 372, in forward
x = self.forward_features(x)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/nest.py", line 360, in forward_features
x = self.levels(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/nest.py", line 213, in forward
x = deblockify(x, self.block_size) # (B, H', W', C')
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/nest.py", line 158, in deblockify
@register_notrace_function # reason: int receives Proxy
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 326, in aot_dispatch_base
fw_module = make_fx(flat_fn, aot_config.decompositions)(*flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/proxy_tensor.py", line 671, in wrapped
t = dispatch_trace(wrap_key(func, args, fx_tracer), tracer=fx_tracer, concrete_args=tuple(phs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/proxy_tensor.py", line 422, in dispatch_trace
graph = tracer.trace(root, concrete_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/_symbolic_trace.py", line 739, in trace
(self.create_arg(fn(*args)),),
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/proxy_tensor.py", line 439, in wrapped
out = f(*tensors)
File "<string>", line 1, in <lambda>
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 841, in functional_call
out = Interpreter(mod).run(*args[params_len:], **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 243, in call_function
return target(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 109, in sym_sqrt
return a.__sym_sqrt__()
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 372, in unary_magic_impl
return wrap_node(getattr(self.node, method)())
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 342, in unary_magic_impl
op = getattr(operator, method)
AttributeError: module 'operator' has no attribute 'sym_sqrt'
While executing %sym_sqrt : [#users=1] = call_function[target=torch.fx.experimental.symbolic_shapes.sym_sqrt](args = (%getitem_1,), kwargs = {})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/nest.py", line 166, in deblockify
grid_size = int(math.sqrt(T))
incomplete graph:
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: f32[s0, s2, s4, s5], [s2*s4*s5, s4*s5, s5, 1]):
pass
TorchDynamo optimized model failed to run because of following error
cuda eval jx_nest_base FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running lcnet_050...
cuda eval lcnet_050 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running levit_128...
ERROR:common:Failed running reshape_as(*(FakeTensor(FakeTensor(..., device='meta',
size=(s0*(-s1 + (-s1 + (-s1 + (-s1 + s2 + 2)//2 + 3)//2 + 3)//2 + 3)//2**2 + 2*s0*(-s1 + (-s1 + (-s1 + (-s1 + s2 + 2)//2 + 3)//2 + 3)//2 + 3)//2 + s0, s7)), cuda:0), FakeTensor(FakeTensor(..., device='meta',
size=(s0, (-s1 + (-s1 + (-s1 + (-s1 + s2 + 2)//2 + 3)//2 + 3)//2 + 3)//2**2 + 2*(-s1 + (-s1 + (-s1 + (-s1 + s2 + 2)//2 + 3)//2 + 3)//2 + 3)//2 + 1, s7)), cuda:0)), **{}):
Cannot call sizes() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 55, in _run_node
return getattr(args[0], node.target)(*args[1:], **kwargs)
RuntimeError: Cannot call sizes() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 807, in CALL_FUNCTION_EX
self.call_function(fn, argsvars.items, kwargsvars.items)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 183, in call_function
tx.call_function(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/misc.py", line 571, in call_function
return self.obj.call_method(tx, self.name, args, kwargs).add_options(self)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 612, in call_method
return self.__class__.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running reshape_as(*(FakeTensor(FakeTensor(..., device='meta',
size=(s0*(-s1 + (-s1 + (-s1 + (-s1 + s2 + 2)//2 + 3)//2 + 3)//2 + 3)//2**2 + 2*s0*(-s1 + (-s1 + (-s1 + (-s1 + s2 + 2)//2 + 3)//2 + 3)//2 + 3)//2 + s0, s7)), cuda:0), FakeTensor(FakeTensor(..., device='meta',
size=(s0, (-s1 + (-s1 + (-s1 + (-s1 + s2 + 2)//2 + 3)//2 + 3)//2 + 3)//2**2 + 2*(-s1 + (-s1 + (-s1 + (-s1 + s2 + 2)//2 + 3)//2 + 3)//2 + 3)//2 + 1, s7)), cuda:0)), **{}):
Cannot call sizes() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval levit_128 FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running mixer_b16_224...
cuda eval mixer_b16_224 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running mixnet_l...
cuda eval mixnet_l PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running mnasnet_100...
cuda eval mnasnet_100 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running mobilenetv2_100...
cuda eval mobilenetv2_100 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running mobilenetv3_large_100...
cuda eval mobilenetv3_large_100 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running mobilevit_s...
[2022-10-30 20:12:21,501] torch._inductor.graph: [WARNING] Creating implicit fallback for:
target: <built-in function truediv>
args[0]: 32
args[1]: 2
ERROR:common:AttributeError: 'Integer' object has no attribute 'device'
target: <built-in function truediv>
args[0]: 32
args[1]: 2
While executing %truediv : [#users=1] = call_function[target=operator.truediv](args = (%sym_size_1, 2), kwargs = {})
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2922, in create
example_output.device,
AttributeError: 'Integer' object has no attribute 'device'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/timm_models.py", line 314, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/byobnet.py", line 1559, in forward
x = self.forward_features(x)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/byobnet.py", line 1551, in forward_features
x = self.stages(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/mobilevit.py", line 201, in forward
def forward(self, x: torch.Tensor) -> torch.Tensor:
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'Integer' object has no attribute 'device'
target: <built-in function truediv>
args[0]: 32
args[1]: 2
While executing %truediv : [#users=1] = call_function[target=operator.truediv](args = (%sym_size_1, 2), kwargs = {})
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda eval mobilevit_s FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running nfnet_l0...
ERROR:common:Failed running reshape_as(*(FakeTensor(FakeTensor(..., device='meta', size=(1, 16, s1**3)), cuda:0), FakeTensor(Parameter(FakeTensor(..., device='meta', size=(16, s1, s1, s1), requires_grad=True)), cuda:0)), **{}):
Cannot call sizes() on tensor with symbolic sizes/strides
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 55, in _run_node
return getattr(args[0], node.target)(*args[1:], **kwargs)
RuntimeError: Cannot call sizes() on tensor with symbolic sizes/strides
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 807, in CALL_FUNCTION_EX
self.call_function(fn, argsvars.items, kwargsvars.items)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 183, in call_function
tx.call_function(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/misc.py", line 571, in call_function
return self.obj.call_method(tx, self.name, args, kwargs).add_options(self)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 612, in call_method
return self.__class__.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running reshape_as(*(FakeTensor(FakeTensor(..., device='meta', size=(1, 16, s1**3)), cuda:0), FakeTensor(Parameter(FakeTensor(..., device='meta', size=(16, s1, s1, s1), requires_grad=True)), cuda:0)), **{}):
Cannot call sizes() on tensor with symbolic sizes/strides
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval nfnet_l0 FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running pit_b_224...
ERROR:common:s4 is needed but not added
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/timm_models.py", line 314, in forward_pass
return mod(*inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/pit.py", line 261, in forward
x = self.forward_features(x)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/pit.py", line 236, in forward_features
x, cls_tokens = self.transformers((x, cls_tokens))
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/pit.py", line 73, in forward
x = module(x)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/pit.py", line 114, in forward
x, cls_tokens = self.pool(x, cls_tokens)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/timm/models/pit.py", line 127, in forward
def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]:
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 480, in codegen
assert shape in added, f"{shape} is needed but not added"
AssertionError: s4 is needed but not added
TorchDynamo optimized model failed to run because of following error
cuda eval pit_b_224 FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running pnasnet5large...
ERROR:common:Failed running <built-in method clamp of type object at 0x7eff66369b20>(*(s5 - (-s1 + s2)//2 + 2*ceiling((-s1 + s2)//2/2 + 1/2) - 3,), **{'min': 0}):
clamp() received an invalid combination of arguments - got (SymInt, min=int), but expected one of:
* (Tensor input, Tensor min, Tensor max, *, Tensor out)
* (Tensor input, Number min, Number max, *, Tensor out)
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
TypeError: clamp() received an invalid combination of arguments - got (SymInt, min=int), but expected one of:
* (Tensor input, Tensor min, Tensor max, *, Tensor out)
* (Tensor input, Number min, Number max, *, Tensor out)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 807, in CALL_FUNCTION_EX
self.call_function(fn, argsvars.items, kwargsvars.items)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/nn_module.py", line 221, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builtin.py", line 344, in call_function
result = handler(tx, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builtin.py", line 398, in _call_min_max
result = variables.TorchVariable(torch.clamp).call_function(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/torch.py", line 404, in call_function
tensor_variable = TensorVariable.create(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 200, in create
example_value = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <built-in method clamp of type object at 0x7eff66369b20>(*(s5 - (-s1 + s2)//2 + 2*ceiling((-s1 + s2)//2/2 + 1/2) - 3,), **{'min': 0}):
clamp() received an invalid combination of arguments - got (SymInt, min=int), but expected one of:
* (Tensor input, Tensor min, Tensor max, *, Tensor out)
* (Tensor input, Number min, Number max, *, Tensor out)
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda eval pnasnet5large FAIL
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running poolformer_m36...
cuda eval poolformer_m36 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running regnety_002...
cuda eval regnety_002 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running repvgg_a2...
cuda eval repvgg_a2 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running res2net101_26w_4s...
cuda eval res2net101_26w_4s PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running res2net50_14w_8s...
cuda eval res2net50_14w_8s PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running res2next50...
cuda eval res2next50 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running resmlp_12_224...
cuda eval resmlp_12_224 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running resnest101e...
cuda eval resnest101e PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running rexnet_100...
cuda eval rexnet_100 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running sebotnet33ts_256...
cuda train sebotnet33ts_256 FAIL (TIMEOUT)
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running selecsls42b...
cuda eval selecsls42b PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running spnasnet_100...
cuda eval spnasnet_100 PASS
/data/users/ezyang/vision/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
warn(f"Failed to load image Python extension: {e}")
Running swin_base_patch4_window7_224...
ERROR:common:Failed running view(*(FakeTensor(FakeTensor(..., device='meta', size=(64*s0, 7, 7, 128)), cuda:0), s0, 8, 8, 7, 7, -1), **{}):
view() received an invalid combination of arguments - got (SymFloat, int, int, int, int, int), but expected one of:
* (torch.dtype dtype)
* (tuple of ints size)
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 55, in _run_node
return getattr(args[0], node.target)(*args[1:], **kwargs)
TypeError: view() received an invalid combination of arguments - got (SymFloat, int, int, int, int, int), but expected one of:
* (torch.dtype dtype)
* (tuple of ints size)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symboli
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment