Skip to content

Instantly share code, notes, and snippets.

@ezyang
Created October 31, 2022 04:37
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ezyang/1be7475e7303fb9190bcb7729cbba257 to your computer and use it in GitHub Desktop.
Save ezyang/1be7475e7303fb9190bcb7729cbba257 to your computer and use it in GitHub Desktop.
Sweep logs for symbolic-shapes --accuracy --backend inductor --training (TORCHDYNAMO_DYNAMIC_SHAPES=1) - Sun Oct 30 20:55:31 PDT 2022
Running BERT_pytorch...
ERROR:common:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/bert.py", line 43, in forward
x = self.embedding(x, segment_info)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/embedding/bert.py", line 32, in forward
x = self.token(sequence) + self.position(sequence) + self.segment(segment_label)
File "/data/users/ezyang/benchmark/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/embedding/bert.py", line 32, in <graph break in forward>
x = self.token(sequence) + self.position(sequence) + self.segment(segment_label)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 123, in compile_fx_inner
compiled_fn = graph.compile_to_fn()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 346, in compile_to_fn
return self.compile_to_module().call
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 332, in compile_to_module
code = self.codegen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 323, in codegen
self.wrapper_code = WrapperCodeGen()
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/codegen/wrapper.py", line 240, in __init__
V.graph.sizevars.codegen(self.prefix, V.graph.graph_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/sizevars.py", line 481, in codegen
assert not needed
AssertionError
TorchDynamo optimized model failed to run because of following error
cuda train BERT_pytorch FAIL
Running Background_Matting...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 256.000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 256.000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 256.000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_108,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_model_al_out': <class 'torch.nn.modules.container.Sequential'>, 'self_model_al_out_0': <class 'torch.nn.modules.upsampling.Upsample'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/Background_Matting/networks.py", line 104, in forward
al_out=self.model_al_out(out_dec_al)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 256.000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 256.000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/Background_Matting/networks.py", line 91, in forward
def forward(self, image,back,seg,multi):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 256.000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 256.000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 256.000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_108,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_model_al_out': <class 'torch.nn.modules.container.Sequential'>, 'self_model_al_out_0': <class 'torch.nn.modules.upsampling.Upsample'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/Background_Matting/networks.py", line 104, in forward
al_out=self.model_al_out(out_dec_al)
TorchDynamo optimized model failed to run because of following error
cuda train Background_Matting FAIL
WARNING:root:DALLE2_pytorch failed to load
Eager model failed to run
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 903, in validate_model
self.model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in forward_and_backward_pass
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1746, in main
device, name, model, example_inputs, batch_size = runner.load_model(
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 282, in load_model
self.validate_model(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 905, in validate_model
raise NotImplementedError("Eager model failed to run")
NotImplementedError: Eager model failed to run
Running LearningToPaint...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/benchmark/torchbenchmark/models/LearningToPaint/baseline/DRL/actor.py", line 111, in forward
x = x.view(x.size(0), -1)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/benchmark/torchbenchmark/models/LearningToPaint/baseline/DRL/actor.py", line 111, in forward
x = x.view(x.size(0), -1)
TorchDynamo optimized model failed to run because of following error
cuda train LearningToPaint FAIL
Running Super_SloMo...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 22.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 22.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 22.0000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_12,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_flowComp': <class 'torchbenchmark.models.Super_SloMo.slomo_model.UNet'>, 'self_flowComp_up1': <class 'torchbenchmark.models.Super_SloMo.slomo_model.up'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/slomo_model.py", line 130, in forward
x = F.interpolate(x, scale_factor=2., mode='bilinear')
| File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/slomo_model.py", line 204, in forward
x = self.up1(x, s5)
| File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/model_wrapper.py", line 28, in forward
flowOut = self.flowComp(torch.cat((I0, I1), dim=1))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 22.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 22.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/model_wrapper.py", line 26, in forward
def forward(self, trainFrameIndex, I0, I1, IFrame):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 22.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 22.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 22.0000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_12,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_flowComp': <class 'torchbenchmark.models.Super_SloMo.slomo_model.UNet'>, 'self_flowComp_up1': <class 'torchbenchmark.models.Super_SloMo.slomo_model.up'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/slomo_model.py", line 130, in forward
x = F.interpolate(x, scale_factor=2., mode='bilinear')
| File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/slomo_model.py", line 204, in forward
x = self.up1(x, s5)
| File "/data/users/ezyang/benchmark/torchbenchmark/models/Super_SloMo/model_wrapper.py", line 28, in forward
flowOut = self.flowComp(torch.cat((I0, I1), dim=1))
TorchDynamo optimized model failed to run because of following error
cuda train Super_SloMo FAIL
Running alexnet...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=1] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/alexnet.py", line 50, in forward
x = torch.flatten(x, 1)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=1] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/alexnet.py", line 50, in forward
x = torch.flatten(x, 1)
TorchDynamo optimized model failed to run because of following error
cuda train alexnet FAIL
Running attention_is_all_you_need_pytorch...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_trg_word_prj': <class 'torch.nn.modules.linear.Linear'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/attention_is_all_you_need_pytorch/transformer/Models.py", line 173, in <graph break in forward>
seq_logit = self.trg_word_prj(dec_output) * self.x_logit_scale
Gradient addition node due to multiple use of tensor around:
Module stack: {'self_src_word_emb': <class 'torch.nn.modules.sparse.Embedding'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/attention_is_all_you_need_pytorch/transformer/Models.py", line 71, in forward
enc_output = self.dropout(self.position_enc(self.src_word_emb(src_seq)))
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_trg_word_prj': <class 'torch.nn.modules.linear.Linear'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/attention_is_all_you_need_pytorch/transformer/Models.py", line 173, in <graph break in forward>
seq_logit = self.trg_word_prj(dec_output) * self.x_logit_scale
Gradient addition node due to multiple use of tensor around:
Module stack: {'self_src_word_emb': <class 'torch.nn.modules.sparse.Embedding'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/attention_is_all_you_need_pytorch/transformer/Models.py", line 71, in forward
enc_output = self.dropout(self.position_enc(self.src_word_emb(src_seq)))
TorchDynamo optimized model failed to run because of following error
cuda train attention_is_all_you_need_pytorch FAIL
Running dcgan...
cuda train dcgan PASS
Running densenet121...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/densenet.py", line 217, in forward
out = torch.flatten(out, 1)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/densenet.py", line 217, in forward
out = torch.flatten(out, 1)
TorchDynamo optimized model failed to run because of following error
cuda train densenet121 FAIL
WARNING:root:detectron2_fcos_r_50_fpn failed to load
FCOS train is not supported by upstream detectron2. See GH Issue: https://github.com/facebookresearch/detectron2/issues/4369.
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1746, in main
device, name, model, example_inputs, batch_size = runner.load_model(
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 251, in load_model
benchmark = benchmark_cls(
File "/data/users/ezyang/benchmark/torchbenchmark/util/model.py", line 18, in __call__
obj = type.__call__(cls, *args, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/detectron2_fcos_r_50_fpn/__init__.py", line 15, in __init__
super().__init__(variant="COCO-Detection/fcos_R_50_FPN_1x.py", test=test, device=device,
File "/data/users/ezyang/benchmark/torchbenchmark/util/framework/detectron2/model_factory.py", line 100, in __init__
loader = self.setup_train(cfg, args)
File "/data/users/ezyang/benchmark/torchbenchmark/util/framework/detectron2/model_factory.py", line 110, in setup_train
raise NotImplementedError("FCOS train is not supported by upstream detectron2. " \
NotImplementedError: FCOS train is not supported by upstream detectron2. See GH Issue: https://github.com/facebookresearch/detectron2/issues/4369.
WARNING:root:detectron2_maskrcnn_r_50_c4 failed to load
Eager model failed to run
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 903, in validate_model
self.model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 336, in forward_and_backward_pass
loss = self.compute_loss(pred)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 326, in compute_loss
return reduce_to_scalar_loss(pred)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/testing.py", line 87, in reduce_to_scalar_loss
return sum([reduce_to_scalar_loss(x) for x in out]) / len(out)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/testing.py", line 87, in <listcomp>
return sum([reduce_to_scalar_loss(x) for x in out]) / len(out)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/testing.py", line 97, in reduce_to_scalar_loss
return sum([reduce_to_scalar_loss(value) for value in out.values()]) / len(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/testing.py", line 97, in <listcomp>
return sum([reduce_to_scalar_loss(value) for value in out.values()]) / len(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/testing.py", line 102, in reduce_to_scalar_loss
raise NotImplementedError("Don't know how to reduce", type(out))
NotImplementedError: ("Don't know how to reduce", <class 'detectron2.structures.instances.Instances'>)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1746, in main
device, name, model, example_inputs, batch_size = runner.load_model(
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 282, in load_model
self.validate_model(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 905, in validate_model
raise NotImplementedError("Eager model failed to run")
NotImplementedError: Eager model failed to run
Running torchbench.py dlrm...
ERROR:common:Failed running <class 'range'>(*(9,), **{}):
'SymInt' object cannot be interpreted as an integer
(scroll up for backtrace)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 53, in _run_node
return node.target(*args, **kwargs)
TypeError: 'SymInt' object cannot be interpreted as an integer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 240, in catch_errors
return callback(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 437, in _convert_frame
result = inner_convert(frame, cache_size)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 112, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 319, in _convert_frame_assert
return _compile(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 374, in _compile
out_code = transform_code_object(code, transform)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/bytecode_transformation.py", line 341, in transform_code_object
transformations(instructions, code_options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/convert_frame.py", line 362, in transform
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1466, in run
super().run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 221, in call_function
return super().call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 191, in call_function
return super(UserFunctionVariable, self).call_function(tx, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/functions.py", line 62, in call_function
return tx.inline_user_function_return(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 296, in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1538, in inline_call
return cls.inline_call_(parent, func, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 1592, in inline_call_
tracer.run()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 352, in run
and self.step()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 325, in step
getattr(self, inst.opname)(inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 177, in wrapper
return inner_fn(self, inst)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 770, in CALL_FUNCTION
self.call_function(fn, args, {})
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/symbolic_convert.py", line 267, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/builtin.py", line 369, in call_function
return DynamicShapeVariable.create(tx, proxy, None, **options)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 634, in create
dyn_shape = _get_fake_value(proxy.node, tx)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 131, in _get_fake_value
return wrap_fake_exception(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 709, in wrap_fake_exception
return fn()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 132, in <lambda>
lambda: _run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/variables/tensor.py", line 62, in _run_node
raise RuntimeError(
RuntimeError: Failed running <class 'range'>(*(9,), **{}):
'SymInt' object cannot be interpreted as an integer
(scroll up for backtrace)
TorchDynamo optimized model failed to run because of following error
cuda train dlrm FAIL
/data/users/ezyang/pytorch-tmp/torch/utils/tensorboard/__init__.py:4: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead.
if not hasattr(tensorboard, "__version__") or LooseVersion(
/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/gym/core.py:317: DeprecationWarning: WARN: Initializing wrapper in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.
deprecation(
Running torchbench.py drq...
cuda train drq FAIL (TIMEOUT)
Running torchbench.py fastNLP_Bert...
[2022-10-30 21:11:19,106] torch._inductor.ir: [WARNING] DeviceCopy
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embeddings': <class 'fastNLP.modules.encoder.bert.BertEmbeddings'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/modules/encoder/bert.py", line 230, in forward
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/modules/encoder/bert.py", line 512, in forward
embedding_output = self.embeddings(input_ids, token_type_ids)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/models/bert.py", line 265, in forward
sequence_output = self.bert(words)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/embeddings/bert_embedding.py", line 137, in forward
outputs = self.model(words)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/embeddings/bert_embedding.py", line 445, in forward
max_word_piece_length = batch_word_pieces_length.sum(dim=-1).max().item() # 表示word piece的长度(包括padding)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/embeddings/bert_embedding.py", line 462, in <graph break in forward>
word_indexes = words.cpu().numpy()
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/embeddings/bert_embedding.py", line 482, in <graph break in forward>
bert_outputs, pooled_cls = self.encoder(word_pieces, token_type_ids=token_type_ids, attention_mask=attn_masks,
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/modules/encoder/bert.py", line 480, in forward
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embeddings': <class 'fastNLP.modules.encoder.bert.BertEmbeddings'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/modules/encoder/bert.py", line 230, in forward
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/fastNLP/modules/encoder/bert.py", line 512, in forward
embedding_output = self.embeddings(input_ids, token_type_ids)
TorchDynamo optimized model failed to run because of following error
cuda train fastNLP_Bert FAIL
Running torchbench.py functorch_dp_cifar10...
cuda train functorch_dp_cifar10 FAIL (TIMEOUT)
Running torchbench.py functorch_maml_omniglot...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_12': <class 'torch.nn.modules.flatten.Flatten'>}
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_12': <class 'torch.nn.modules.flatten.Flatten'>}
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
TorchDynamo optimized model failed to run because of following error
cuda train functorch_maml_omniglot FAIL
Running torchbench.py hf_Albert...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_predictions': <class 'transformers.models.albert.modeling_albert.AlbertMLMHead'>, 'self_predictions_dense': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 883, in forward
hidden_states = self.dense(hidden_states)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 1003, in <graph break in forward>
prediction_scores = self.predictions(sequence_outputs)
Gradient addition node due to multiple use of tensor around:
Module stack: {'self_embedding_hidden_mapping_in': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 470, in forward
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_predictions': <class 'transformers.models.albert.modeling_albert.AlbertMLMHead'>, 'self_predictions_dense': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 883, in forward
hidden_states = self.dense(hidden_states)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 1003, in <graph break in forward>
prediction_scores = self.predictions(sequence_outputs)
Gradient addition node due to multiple use of tensor around:
Module stack: {'self_embedding_hidden_mapping_in': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/albert/modeling_albert.py", line 470, in forward
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
TorchDynamo optimized model failed to run because of following error
cuda train hf_Albert FAIL
Running torchbench.py hf_Bart...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 134, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 801, in forward
embed_pos = self.embed_positions(input_shape)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/util/framework/huggingface/model_factory.py", line 41, in forward
return self.model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 1353, in forward
outputs = self.model(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 1222, in forward
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 735, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_embed_positions': <class 'transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 134, in forward
positions = torch.arange(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bart/modeling_bart.py", line 801, in forward
embed_pos = self.embed_positions(input_shape)
TorchDynamo optimized model failed to run because of following error
cuda train hf_Bart FAIL
Running torchbench.py hf_Bert...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_cls': <class 'transformers.models.bert.modeling_bert.BertOnlyMLMHead'>, 'self_cls_predictions': <class 'transformers.models.bert.modeling_bert.BertLMPredictionHead'>, 'self_cls_predictions_transform': <class 'transformers.models.bert.modeling_bert.BertPredictionHeadTransform'>, 'self_cls_predictions_transform_dense': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 675, in forward
hidden_states = self.dense(hidden_states)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 696, in forward
hidden_states = self.transform(hidden_states)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 707, in forward
prediction_scores = self.predictions(sequence_output)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 1366, in <graph break in forward>
prediction_scores = self.cls(sequence_output)
Gradient addition node due to multiple use of tensor around:
Module stack: {'self_layer_0': <class 'transformers.models.bert.modeling_bert.BertLayer'>, 'self_layer_0_attention': <class 'transformers.models.bert.modeling_bert.BertAttention'>, 'self_layer_0_attention_self': <class 'transformers.models.bert.modeling_bert.BertSelfAttention'>, 'self_layer_0_attention_self_query': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 289, in forward
mixed_query_layer = self.query(hidden_states)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 423, in forward
self_outputs = self.self(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 493, in forward
self_attention_outputs = self.attention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 607, in forward
layer_outputs = layer_module(
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_cls': <class 'transformers.models.bert.modeling_bert.BertOnlyMLMHead'>, 'self_cls_predictions': <class 'transformers.models.bert.modeling_bert.BertLMPredictionHead'>, 'self_cls_predictions_transform': <class 'transformers.models.bert.modeling_bert.BertPredictionHeadTransform'>, 'self_cls_predictions_transform_dense': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 675, in forward
hidden_states = self.dense(hidden_states)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 696, in forward
hidden_states = self.transform(hidden_states)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 707, in forward
prediction_scores = self.predictions(sequence_output)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 1366, in <graph break in forward>
prediction_scores = self.cls(sequence_output)
Gradient addition node due to multiple use of tensor around:
Module stack: {'self_layer_0': <class 'transformers.models.bert.modeling_bert.BertLayer'>, 'self_layer_0_attention': <class 'transformers.models.bert.modeling_bert.BertAttention'>, 'self_layer_0_attention_self': <class 'transformers.models.bert.modeling_bert.BertSelfAttention'>, 'self_layer_0_attention_self_query': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 289, in forward
mixed_query_layer = self.query(hidden_states)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 423, in forward
self_outputs = self.self(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 493, in forward
self_attention_outputs = self.attention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/bert/modeling_bert.py", line 607, in forward
layer_outputs = layer_module(
TorchDynamo optimized model failed to run because of following error
cuda train hf_Bert FAIL
Running torchbench.py hf_BigBird...
ERROR:common:(False, s0, s1)
While executing return (False, sym_size, sym_size_1)
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/big_bird/modeling_big_bird.py", line 2462, in forward
outputs = self.bert(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/big_bird/modeling_big_bird.py", line 2104, in forward
blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/big_bird/modeling_big_bird.py", line 2185, in create_masks_for_block_sparse_attn
@staticmethod
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 336, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 267, in output
assert all(
AssertionError: (False, s0, s1)
While executing return (False, sym_size, sym_size_1)
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda train hf_BigBird FAIL
Running torchbench.py hf_DistilBert...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_vocab_transform': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 659, in <graph break in forward>
prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
Gradient addition node due to multiple use of tensor around:
Module stack: {'self_transformer': <class 'transformers.models.distilbert.modeling_distilbert.Transformer'>, 'self_transformer_layer_0': <class 'transformers.models.distilbert.modeling_distilbert.TransformerBlock'>, 'self_transformer_layer_0_attention': <class 'transformers.models.distilbert.modeling_distilbert.MultiHeadSelfAttention'>, 'self_transformer_layer_0_attention_q_lin': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 207, in forward
q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 283, in forward
sa_output = self.attention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 345, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 567, in <graph break in forward>
return self.transformer(
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_vocab_transform': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 659, in <graph break in forward>
prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
Gradient addition node due to multiple use of tensor around:
Module stack: {'self_transformer': <class 'transformers.models.distilbert.modeling_distilbert.Transformer'>, 'self_transformer_layer_0': <class 'transformers.models.distilbert.modeling_distilbert.TransformerBlock'>, 'self_transformer_layer_0_attention': <class 'transformers.models.distilbert.modeling_distilbert.MultiHeadSelfAttention'>, 'self_transformer_layer_0_attention_q_lin': <class 'torch.nn.modules.linear.Linear'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 207, in forward
q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 283, in forward
sa_output = self.attention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 345, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 567, in <graph break in forward>
return self.transformer(
TorchDynamo optimized model failed to run because of following error
cuda train hf_DistilBert FAIL
Running torchbench.py hf_GPT2...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 793, in forward
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 1048, in forward
transformer_outputs = self.transformer(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 738, in forward
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 793, in forward
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
TorchDynamo optimized model failed to run because of following error
cuda train hf_GPT2 FAIL
Running torchbench.py hf_GPT2_large...
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 349, in <module>
main(TorchBenchmarkRunner(), original_dir)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1775, in main
runner.run_one_model(
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 768, in inner
return fn(self, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1213, in run_one_model
status = self.check_accuracy(
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1023, in check_accuracy
correct_rerun_result = self.run_n_iterations(
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 946, in run_n_iterations
return self.model_iter_fn(mod, inputs, collect_outputs=True)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in forward_and_backward_pass
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 1048, in forward
transformer_outputs = self.transformer(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 891, in forward
outputs = block(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 428, in forward
feed_forward_hidden_states = self.mlp(hidden_states)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 356, in forward
hidden_states = self.act(hidden_states)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/activations.py", line 34, in forward
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB (GPU 0; 39.59 GiB total capacity; 37.54 GiB already allocated; 1.44 MiB free; 38.41 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
cuda train hf_GPT2_large FAIL
Running torchbench.py hf_Longformer...
[2022-10-30 21:21:38,092] torch._dynamo.variables.builtin: [WARNING] incorrect arg count <bound method BuiltinVariable._call_min_max of BuiltinVariable(max)> missing a required argument: 'b'
[2022-10-30 21:21:38,122] torch._dynamo.variables.builtin: [WARNING] incorrect arg count <bound method BuiltinVariable._call_min_max of BuiltinVariable(max)> missing a required argument: 'b'
[2022-10-30 21:21:39,013] torch._dynamo.variables.builtin: [WARNING] incorrect arg count <bound method BuiltinVariable._call_min_max of BuiltinVariable(max)> missing a required argument: 'b'
[2022-10-30 21:21:42,296] torch._inductor.ir: [WARNING] Using FallbackKernel: aten.cumsum
[2022-10-30 21:21:44,860] torch._inductor.graph: [WARNING] Creating implicit fallback for:
target: <built-in function eq>
args[0]: 768
args[1]: 768
ERROR:common:AttributeError: 'bool' object has no attribute 'device'
target: <built-in function eq>
args[0]: 768
args[1]: 768
While executing %eq : [#users=1] = call_function[target=operator.eq](args = (%sym_size_2, 768), kwargs = {})
Original traceback:
None
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2922, in create
example_output.device,
AttributeError: 'bool' object has no attribute 'device'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1813, in forward
outputs = self.longformer(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1696, in forward
padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1715, in <graph break in forward>
encoder_outputs = self.encoder(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1265, in forward
is_global_attn = is_index_global_attn.flatten().any().item()
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1297, in <graph break in forward>
layer_outputs = layer_module(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1221, in forward
self_attn_outputs = self.attention(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 1157, in forward
self_outputs = self.self(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/longformer/modeling_longformer.py", line 542, in forward
def forward(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AttributeError: 'bool' object has no attribute 'device'
target: <built-in function eq>
args[0]: 768
args[1]: 768
While executing %eq : [#users=1] = call_function[target=operator.eq](args = (%sym_size_2, 768), kwargs = {})
Original traceback:
None
TorchDynamo optimized model failed to run because of following error
cuda train hf_Longformer FAIL
Running torchbench.py hf_Reformer...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 249, in forward
position_ids = torch.arange(
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 2397, in forward
reformer_outputs = self.reformer(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 2063, in forward
least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 2100, in <graph break in forward>
embedding_output = self.embeddings(
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 239, in forward
def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.start
args[0]: 0
args[1]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, %add), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/reformer/modeling_reformer.py", line 249, in forward
position_ids = torch.arange(
TorchDynamo optimized model failed to run because of following error
cuda train hf_Reformer FAIL
Running torchbench.py hf_T5...
WARNING:common:fp64 golden ref were not generated for hf_T5
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=2] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/util/framework/huggingface/model_factory.py", line 41, in forward
return self.model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1537, in forward
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=2] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
TorchDynamo optimized model failed to run because of following error
cuda train hf_T5 FAIL
Running torchbench.py hf_T5_base...
WARNING:common:fp64 golden ref were not generated for hf_T5_base
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=2] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/util/framework/huggingface/model_factory.py", line 41, in forward
return self.model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1537, in forward
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=2] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
TorchDynamo optimized model failed to run because of following error
cuda train hf_T5_base FAIL
Running torchbench.py hf_T5_large...
WARNING:common:fp64 golden ref were not generated for hf_T5_large
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=2] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/util/framework/huggingface/model_factory.py", line 41, in forward
return self.model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1537, in forward
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Symbol'.
Position: 1
Value: s1
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast s1 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: s1
kwargs: {'dtype': torch.int64, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=2] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_2,), kwargs = {dtype: torch.int64, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_encoder': <class 'transformers.models.t5.modeling_t5.T5Stack'>, 'self_encoder_block_0': <class 'transformers.models.t5.modeling_t5.T5Block'>, 'sub0_0': <class 'transformers.models.t5.modeling_t5.T5LayerSelfAttention'>, 'self_encoder_block_0_layer_0_SelfAttention': <class 'transformers.models.t5.modeling_t5.T5Attention'>}
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 423, in compute_bias
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 519, in forward
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 570, in forward
attention_output = self.SelfAttention(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 664, in forward
self_attention_outputs = self.layer[0](
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1033, in forward
layer_outputs = layer_module(
| File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py", line 1601, in forward
encoder_outputs = self.encoder(
TorchDynamo optimized model failed to run because of following error
cuda train hf_T5_large FAIL
Running torchbench.py lennard_jones...
cuda train lennard_jones PASS
Running torchbench.py maml_omniglot...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=1] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_12': <class 'torch.nn.modules.flatten.Flatten'>}
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=1] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_12': <class 'torch.nn.modules.flatten.Flatten'>}
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/container.py", line 204, in forward
input = module(input)
TorchDynamo optimized model failed to run because of following error
cuda train maml_omniglot FAIL
Running torchbench.py mnasnet1_0...
ERROR:common:AssertionError:
target: aten.div.Scalar
args[0]: TensorBox(
ReinterpretView(
StorageBox(
MatrixMultiply(
name=buf0,
layout=FixedLayout('cuda', torch.float32, size=[s0, 1280], stride=[1280, 1]),
inputs=[InputBuffer(name='orig_tangents_1', layout=FixedLayout('cuda', torch.float32, size=[s0, s1], stride=[s1, 1])), InputBuffer(name='permute_1', layout=FixedLayout('cuda', torch.float32, size=[1000, 1280], stride=[1280, 1]))],
constant_args=(),
kwargs={},
output_view=None,
origins={mm}
)
),
FixedLayout('cuda', torch.float32, size=[2, 1280, 7, 7], stride=[1280, 1, 0, 0]),
origins={expand}
)
)
args[1]: 49
While executing %div : [#users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%expand, %mul_156), kwargs = {})
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/mnasnet.py", line 161, in forward
x = x.mean([2, 3])
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 3182, in div
dtype = get_promoted_dtype(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 138, in get_promoted_dtype
inps = [construct_input(arg) for arg in args]
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 138, in <listcomp>
inps = [construct_input(arg) for arg in args]
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 133, in construct_input
assert hasattr(inp, "get_dtype")
AssertionError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AssertionError:
target: aten.div.Scalar
args[0]: TensorBox(
ReinterpretView(
StorageBox(
MatrixMultiply(
name=buf0,
layout=FixedLayout('cuda', torch.float32, size=[s0, 1280], stride=[1280, 1]),
inputs=[InputBuffer(name='orig_tangents_1', layout=FixedLayout('cuda', torch.float32, size=[s0, s1], stride=[s1, 1])), InputBuffer(name='permute_1', layout=FixedLayout('cuda', torch.float32, size=[1000, 1280], stride=[1280, 1]))],
constant_args=(),
kwargs={},
output_view=None,
origins={mm}
)
),
FixedLayout('cuda', torch.float32, size=[2, 1280, 7, 7], stride=[1280, 1, 0, 0]),
origins={expand}
)
)
args[1]: 49
While executing %div : [#users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%expand, %mul_156), kwargs = {})
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/mnasnet.py", line 161, in forward
x = x.mean([2, 3])
TorchDynamo optimized model failed to run because of following error
cuda train mnasnet1_0 FAIL
Running torchbench.py mobilenet_v2...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/mobilenetv2.py", line 169, in _forward_impl
x = torch.flatten(x, 1)
| File "/data/users/ezyang/vision/torchvision/models/mobilenetv2.py", line 174, in forward
return self._forward_impl(x)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/mobilenetv2.py", line 169, in _forward_impl
x = torch.flatten(x, 1)
| File "/data/users/ezyang/vision/torchvision/models/mobilenetv2.py", line 174, in forward
return self._forward_impl(x)
TorchDynamo optimized model failed to run because of following error
cuda train mobilenet_v2 FAIL
Running torchbench.py mobilenet_v2_quantized_qat...
WARNING:common:fp64 golden ref were not generated for mobilenet_v2_quantized_qat
[2022-10-30 21:28:29,841] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,848] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,862] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,866] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,871] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,882] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,886] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,890] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,902] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,906] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,918] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,922] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,926] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,939] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,943] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,947] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,959] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,963] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,975] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,979] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,983] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,995] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:29,999] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,003] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,015] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,017] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,021] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,033] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,037] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,041] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,054] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,058] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,062] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,075] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,079] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,092] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,096] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,100] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,112] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,116] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,120] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,133] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,135] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,138] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,150] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,154] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,158] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,169] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,173] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,177] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,189] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,192] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,195] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,207] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,211] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,215] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,227] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,231] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,234] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,247] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,251] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,263] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,267] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,271] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,282] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,286] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,290] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,301] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,303] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,307] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,318] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,322] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,326] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,337] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,341] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,345] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,356] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,358] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,362] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,373] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,377] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,381] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,393] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,397] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,400] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,412] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,414] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,418] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,429] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,434] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,437] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,449] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,453] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,457] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,469] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,472] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,485] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,488] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,492] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,504] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,508] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,512] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,523] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,525] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,529] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,541] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,544] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,548] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,559] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,563] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,567] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,578] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,581] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,584] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,596] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,599] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,603] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,616] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,620] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,624] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,636] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,640] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,653] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,657] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,661] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,672] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,675] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,680] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,691] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,693] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,697] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,708] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,712] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,716] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,727] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,732] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,735] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,747] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,749] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,752] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,764] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,768] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,772] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,784] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,788] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:30,791] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:31,307] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:31,311] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:31,323] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:31,327] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:31,331] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:31,333] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:31,334] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:31,335] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:28:31,336] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
ERROR:common:name 's0' is not defined
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/graph_module.py", line 660, in call_wrapped
return self._wrapped_call(self, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/graph_module.py", line 279, in __call__
raise e
File "/data/users/ezyang/pytorch-tmp/torch/fx/graph_module.py", line 269, in __call__
return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc]
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "<eval_with_key>.5", line 4, in forward
def forward(self, x : torch.Tensor) -> torch.Tensor:
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 885, in new_func
return compiled_fn(args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 271, in g
return f(*args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 513, in compiled_function
return CompiledFunction.apply(*remove_dupe_args(args))
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 471, in forward
fw_outs = call_func_with_args(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 296, in call_func_with_args
out = normalize_as_list(f(args))
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 186, in run
return model(new_inputs)
File "/tmp/torchinductor_ezyang/cl/cclnalhvewt4b3vliempyodfzqlczg4crwi2xomahb6n7lu3hu72.py", line 4488, in call
return (buf1283, orig_primals_1, orig_primals_2, orig_primals_4, orig_primals_5, orig_primals_7, orig_primals_8, orig_primals_10, orig_primals_11, orig_primals_13, orig_primals_14, orig_primals_16, orig_primals_17, orig_primals_19, orig_primals_20, orig_primals_22, orig_primals_23, orig_primals_25, orig_primals_26, orig_primals_28, orig_primals_29, orig_primals_31, orig_primals_32, orig_primals_34, orig_primals_35, orig_primals_37, orig_primals_38, orig_primals_40, orig_primals_41, orig_primals_43, orig_primals_44, orig_primals_46, orig_primals_47, orig_primals_49, orig_primals_50, orig_primals_52, orig_primals_53, orig_primals_55, orig_primals_56, orig_primals_58, orig_primals_59, orig_primals_61, orig_primals_62, orig_primals_64, orig_primals_65, orig_primals_67, orig_primals_68, orig_primals_70, orig_primals_71, orig_primals_73, orig_primals_74, orig_primals_76, orig_primals_77, orig_primals_79, orig_primals_80, orig_primals_82, orig_primals_83, orig_primals_85, orig_primals_86, orig_primals_88, orig_primals_89, orig_primals_91, orig_primals_92, orig_primals_94, orig_primals_95, orig_primals_97, orig_primals_98, orig_primals_100, orig_primals_101, orig_primals_103, orig_primals_104, orig_primals_106, orig_primals_107, orig_primals_109, orig_primals_110, orig_primals_112, orig_primals_113, orig_primals_115, orig_primals_116, orig_primals_118, orig_primals_119, orig_primals_121, orig_primals_122, orig_primals_124, orig_primals_125, orig_primals_127, orig_primals_128, orig_primals_130, orig_primals_131, orig_primals_133, orig_primals_134, orig_primals_136, orig_primals_137, orig_primals_139, orig_primals_140, orig_primals_142, orig_primals_143, orig_primals_145, orig_primals_146, orig_primals_148, orig_primals_149, orig_primals_151, orig_primals_152, orig_primals_154, orig_primals_155, orig_primals_166, orig_primals_167, orig_primals_183, orig_primals_184, orig_primals_200, orig_primals_201, orig_primals_217, orig_primals_218, orig_primals_234, orig_primals_235, orig_primals_251, orig_primals_252, orig_primals_268, orig_primals_269, orig_primals_285, orig_primals_286, orig_primals_302, orig_primals_303, orig_primals_326, orig_primals_327, orig_primals_343, orig_primals_344, orig_primals_360, orig_primals_361, orig_primals_377, orig_primals_378, orig_primals_394, orig_primals_395, orig_primals_411, orig_primals_412, orig_primals_435, orig_primals_436, orig_primals_452, orig_primals_453, orig_primals_469, orig_primals_470, orig_primals_493, orig_primals_494, orig_primals_510, orig_primals_511, orig_primals_527, orig_primals_528, orig_primals_544, orig_primals_545, orig_primals_561, orig_primals_562, orig_primals_578, orig_primals_579, orig_primals_602, orig_primals_603, orig_primals_619, orig_primals_620, orig_primals_636, orig_primals_637, orig_primals_660, orig_primals_661, orig_primals_677, orig_primals_678, orig_primals_694, orig_primals_695, orig_primals_718, orig_primals_719, orig_primals_735, orig_primals_736, orig_primals_752, orig_primals_753, orig_primals_769, orig_primals_770, orig_primals_786, orig_primals_787, orig_primals_803, orig_primals_804, orig_primals_827, orig_primals_828, orig_primals_844, orig_primals_845, orig_primals_861, orig_primals_862, orig_primals_885, orig_primals_886, orig_primals_902, orig_primals_903, orig_primals_919, orig_primals_920, orig_primals_936, orig_primals_937, orig_primals_953, orig_primals_954, orig_primals_970, orig_primals_971, orig_primals_994, orig_primals_995, orig_primals_1011, orig_primals_1012, orig_primals_1028, orig_primals_1029, orig_primals_1052, orig_primals_1053, orig_primals_1069, orig_primals_1070, orig_primals_1086, orig_primals_1087, orig_primals_1103, orig_primals_1104, buf1, buf9, buf10, buf1289, buf19, buf26, buf27, buf34, buf35, buf1290, buf44, buf51, buf52, buf59, buf60, buf1291, buf68, buf69, buf76, buf77, buf1292, buf86, buf93, buf94, buf101, buf102, buf1293, buf111, buf118, buf119, buf126, buf127, buf1294, buf135, buf136, buf143, buf144, buf1295, buf153, buf160, buf161, buf168, buf169, buf1296, buf178, buf185, buf186, buf193, buf194, buf1297, buf203, buf210, buf211, buf218, buf219, buf1298, buf228, buf235, buf236, buf243, buf244, buf1299, buf253, buf260, buf261, buf268, buf269, buf1300, buf277, buf278, buf285, buf286, buf1301, buf295, buf302, buf303, buf310, buf311, buf1302, buf320, buf327, buf328, buf335, buf336, buf1303, buf345, buf352, buf353, buf360, buf361, buf1304, buf370, buf377, buf378, buf385, buf386, buf1305, buf395, buf402, buf403, buf410, buf411, buf1306, buf420, buf427, buf428, buf435, buf436, buf1307, buf445, buf452, buf453, buf460, buf461, buf1308, buf470, buf477, buf478, buf485, buf486, buf1309, buf494, buf495, buf502, buf503, buf1310, buf512, buf519, buf520, buf527, buf528, buf1311, buf537, buf544, buf545, buf552, buf553, buf1312, buf562, buf569, buf570, buf577, buf578, buf1313, buf587, buf594, buf595, buf602, buf603, buf1314, buf612, buf619, buf620, buf627, buf628, buf1315, buf637, buf644, buf645, buf652, buf653, buf1316, buf662, buf669, buf670, buf677, buf678, buf1317, buf687, buf694, buf695, buf702, buf703, buf1318, buf712, buf719, buf720, buf727, buf728, buf1319, buf737, buf744, buf745, buf752, buf753, buf1320, buf762, buf769, buf770, buf777, buf778, buf1321, buf786, buf787, buf794, buf795, buf1322, buf804, buf811, buf812, buf819, buf820, buf1323, buf829, buf836, buf837, buf844, buf845, buf1324, buf854, buf861, buf862, buf869, buf870, buf1325, buf879, buf886, buf887, buf894, buf895, buf1326, buf904, buf911, buf912, buf919, buf920, buf1327, buf929, buf936, buf937, buf944, buf945, buf1328, buf954, buf961, buf962, buf969, buf970, buf1329, buf979, buf986, buf987, buf994, buf995, buf1330, buf1003, buf1004, buf1011, buf1012, buf1331, buf1021, buf1028, buf1029, buf1036, buf1037, buf1332, buf1046, buf1053, buf1054, buf1061, buf1062, buf1333, buf1071, buf1078, buf1079, buf1086, buf1087, buf1334, buf1096, buf1103, buf1104, buf1111, buf1112, buf1335, buf1121, buf1128, buf1129, buf1136, buf1137, buf1336, buf1146, buf1153, buf1154, buf1161, buf1162, buf1337, buf1171, buf1178, buf1179, buf1186, buf1187, buf1338, buf1196, buf1203, buf1204, buf1211, buf1212, buf1339, buf1220, buf1221, buf1228, buf1229, buf1340, buf1238, buf1246, buf1255, buf1262, buf1268, buf1269, buf1276, buf1284, as_strided(buf1275, (1000, 1280), (1280, 1)), buf1341, buf1342, buf1343, buf1344, buf1345, buf1346, buf1347, buf1348, buf1349, buf1350, buf1351, buf1352, buf1353, buf1354, buf1355, buf1356, buf1357, buf1358, buf1359, buf1360, buf1361, buf1362, buf1363, buf1364, buf1365, buf1366, buf1367, buf1368, buf1369, buf1370, buf1371, buf1372, buf1373, buf1374, buf1375, s0, )
NameError: name 's0' is not defined
TorchDynamo optimized model failed to run because of following error
cuda train mobilenet_v2_quantized_qat FAIL
Running torchbench.py mobilenet_v3_large...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=10] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/mobilenetv3.py", line 213, in _forward_impl
x = torch.flatten(x, 1)
| File "/data/users/ezyang/vision/torchvision/models/mobilenetv3.py", line 220, in forward
return self._forward_impl(x)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=10] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/mobilenetv3.py", line 213, in _forward_impl
x = torch.flatten(x, 1)
| File "/data/users/ezyang/vision/torchvision/models/mobilenetv3.py", line 220, in forward
return self._forward_impl(x)
TorchDynamo optimized model failed to run because of following error
cuda train mobilenet_v3_large FAIL
devgpu001:632787:632787 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth0
devgpu001:632787:632787 [0] NCCL INFO NCCL_SOCKET_IFNAME set to eth0
devgpu001:632787:632787 [0] NCCL INFO Bootstrap : Using eth0:2803:6081:d0a8:baaf::1<0>
devgpu001:632787:632787 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
devgpu001:632787:632787 [0] NCCL INFO cudaDriverVersion 11040
NCCL version 2.14.3+cuda11.4
devgpu001:632787:633526 [0] NCCL INFO NCCL_IB_DISABLE set by environment to 1.
devgpu001:632787:633526 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth0
devgpu001:632787:633526 [0] NCCL INFO NET/Socket : Using [0]eth0:2803:6081:d0a8:baaf::1<0>
devgpu001:632787:633526 [0] NCCL INFO Using network Socket
devgpu001:632787:633526 [0] NCCL INFO NET/Socket : GPU Direct RDMA Disabled for HCA 0 'eth0'
devgpu001:632787:633526 [0] NCCL INFO === System : maxBw 5000.0 totalBw 0.0 ===
devgpu001:632787:633526 [0] NCCL INFO CPU/0 (1/1/2)
devgpu001:632787:633526 [0] NCCL INFO + PCI[12.0] - PCI/D000 (11f840001d9bfbe1)
devgpu001:632787:633526 [0] NCCL INFO + PCI[24.0] - PCI/F000 (11f840001d9bfbe0)
devgpu001:632787:633526 [0] NCCL INFO + PCI[24.0] - GPU/11000 (0)
devgpu001:632787:633526 [0] NCCL INFO + PCI[12.0] - NIC/30000
devgpu001:632787:633526 [0] NCCL INFO ==========================================
devgpu001:632787:633526 [0] NCCL INFO GPU/11000 :GPU/11000 (0/5000.000000/LOC) CPU/0 (3/12.000000/PHB)
devgpu001:632787:633526 [0] NCCL INFO Setting affinity for GPU 0 to ffffff,00000000,00000000,00ffffff
devgpu001:632787:633526 [0] NCCL INFO Pattern 4, crossNic 0, nChannels 16, bw 44.000000/44.000000, type LOC/PIX, sameChannels 1
devgpu001:632787:633526 [0] NCCL INFO 0 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 1 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 2 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 3 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 4 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 5 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 6 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 7 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 8 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 9 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 10 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 11 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 12 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 13 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 14 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 15 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO Pattern 3, crossNic 0, nChannels 16, bw 44.000000/44.000000, type LOC/PIX, sameChannels 1
devgpu001:632787:633526 [0] NCCL INFO 0 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 1 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 2 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 3 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 4 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 5 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 6 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 7 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 8 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 9 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 10 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 11 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 12 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 13 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 14 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 15 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO Pattern 3, crossNic 0, nChannels 16, bw 44.000000/44.000000, type LOC/PIX, sameChannels 1
devgpu001:632787:633526 [0] NCCL INFO 0 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 1 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 2 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 3 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 4 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 5 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 6 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 7 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 8 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 9 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 10 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 11 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 12 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 13 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 14 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO 15 : GPU/0
devgpu001:632787:633526 [0] NCCL INFO Tree 0 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 16 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 1 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 17 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 2 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 18 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 3 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 19 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 4 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 20 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 5 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 21 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 6 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 22 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 7 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 23 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 8 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 24 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 9 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 25 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 10 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 26 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 11 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 27 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 12 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 28 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 13 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 29 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 14 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 30 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 15 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Tree 31 : -1 -> 0 -> -1/-1/-1
devgpu001:632787:633526 [0] NCCL INFO Channel 00/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 01/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 02/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 03/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 04/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 05/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 06/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 07/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 08/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 09/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 10/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 11/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 12/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 13/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 14/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 15/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 16/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 17/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 18/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 19/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 20/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 21/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 22/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 23/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 24/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 25/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 26/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 27/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 28/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 29/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 30/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Channel 31/32 : 0
devgpu001:632787:633526 [0] NCCL INFO Ring 00 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 01 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 02 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 03 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 04 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 05 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 06 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 07 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 08 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 09 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 10 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 11 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 12 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 13 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 14 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 15 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 16 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 17 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 18 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 19 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 20 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 21 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 22 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 23 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 24 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 25 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 26 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 27 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 28 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 29 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 30 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Ring 31 : 0 -> 0 -> 0
devgpu001:632787:633526 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1 [1] -1/-1/-1->0->-1 [2] -1/-1/-1->0->-1 [3] -1/-1/-1->0->-1 [4] -1/-1/-1->0->-1 [5] -1/-1/-1->0->-1 [6] -1/-1/-1->0->-1 [7] -1/-1/-1->0->-1 [8] -1/-1/-1->0->-1 [9] -1/-1/-1->0->-1 [10] -1/-1/-1->0->-1 [11] -1/-1/-1->0->-1 [12] -1/-1/-1->0->-1 [13] -1/-1/-1->0->-1 [14] -1/-1/-1->0->-1 [15] -1/-1/-1->0->-1 [16] -1/-1/-1->0->-1 [17] -1/-1/-1->0->-1 [18] -1/-1/-1->0->-1 [19] -1/-1/-1->0->-1 [20] -1/-1/-1->0->-1 [21] -1/-1/-1->0->-1 [22] -1/-1/-1->0->-1 [23] -1/-1/-1->0->-1 [24] -1/-1/-1->0->-1 [25] -1/-1/-1->0->-1 [26] -1/-1/-1->0->-1 [27] -1/-1/-1->0->-1 [28] -1/-1/-1->0->-1 [29] -1/-1/-1->0->-1 [30] -1/-1/-1->0->-1 [31] -1/-1/-1->0->-1
devgpu001:632787:633526 [0] NCCL INFO Connected all rings
devgpu001:632787:633526 [0] NCCL INFO Connected all trees
devgpu001:632787:633526 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer
devgpu001:632787:633534 [0] NCCL INFO New proxy send connection 0 from local rank 0, transport 2
devgpu001:632787:633526 [0] NCCL INFO Connection to proxy localRank 0 -> connection 0x7f9820002e80
devgpu001:632787:633526 [0] NCCL INFO comm 0x564b425bfa90 rank 0 nranks 1 cudaDev 0 busId 11000 - Init COMPLETE
Running torchbench.py moco...
ERROR:common:argument of type: <class 'range_iterator'>
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/parallel/distributed.py", line 1093, in forward
output = self._run_ddp_forward(*inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/parallel/distributed.py", line 1047, in _run_ddp_forward
return module_to_run(*inputs[0], **kwargs[0])
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/moco/moco/builder.py", line 130, in forward
self._momentum_update_key_encoder() # update the key encoder
File "/data/users/ezyang/benchmark/torchbenchmark/models/moco/moco/builder.py", line 133, in <graph break in forward>
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
File "/data/users/ezyang/pytorch-tmp/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/moco/moco/builder.py", line 76, in _batch_shuffle_ddp
x_gather = concat_all_gather(x)
File "/data/users/ezyang/pytorch-tmp/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/moco/moco/builder.py", line 164, in concat_all_gather
@torch.no_grad()
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 603, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 326, in aot_dispatch_base
fw_module = make_fx(flat_fn, aot_config.decompositions)(*flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/proxy_tensor.py", line 671, in wrapped
t = dispatch_trace(wrap_key(func, args, fx_tracer), tracer=fx_tracer, concrete_args=tuple(phs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/proxy_tensor.py", line 422, in dispatch_trace
graph = tracer.trace(root, concrete_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/_symbolic_trace.py", line 739, in trace
(self.create_arg(fn(*args)),),
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/proxy_tensor.py", line 412, in create_arg
return super().create_arg(a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/_symbolic_trace.py", line 344, in create_arg
return super().create_arg(a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/proxy.py", line 140, in create_arg
return type(a)(self.create_arg(elem) for elem in a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/proxy.py", line 140, in <genexpr>
return type(a)(self.create_arg(elem) for elem in a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/proxy_tensor.py", line 412, in create_arg
return super().create_arg(a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/_symbolic_trace.py", line 344, in create_arg
return super().create_arg(a)
File "/data/users/ezyang/pytorch-tmp/torch/fx/proxy.py", line 165, in create_arg
raise NotImplementedError(f"argument of type: {type(a)}")
NotImplementedError: argument of type: <class 'range_iterator'>
incomplete graph:
class <lambda>(torch.nn.Module):
def forward(self):
pass
TorchDynamo optimized model failed to run because of following error
cuda train moco FAIL
Running torchbench.py nvidia_deeprecommender...
cuda train nvidia_deeprecommender PASS
Running torchbench.py pytorch_CycleGAN_and_pix2pix...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=102] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_model': <class 'torch.nn.modules.container.Sequential'>, 'self_model_2': <class 'torch.nn.modules.instancenorm.InstanceNorm2d'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_CycleGAN_and_pix2pix/models/networks.py", line 372, in forward
return self.model(input)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=102] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_model': <class 'torch.nn.modules.container.Sequential'>, 'self_model_2': <class 'torch.nn.modules.instancenorm.InstanceNorm2d'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_CycleGAN_and_pix2pix/models/networks.py", line 372, in forward
return self.model(input)
--dataroot /data/users/ezyang/benchmark/torchbenchmark/data/.data/pytorch_CycleGAN_and_pix2pix_inputs/datasets/horse2zebra --name horse2zebra --model cycle_gan --display_id 0 --n_epochs 3 --n_epochs_decay 3 --gpu_ids 0 --checkpoints_dir /data/users/ezyang/benchmark/torchbenchmark/models/pytorch_CycleGAN_and_pix2pix/.data/checkpoints
TorchDynamo optimized model failed to run because of following error
cuda train pytorch_CycleGAN_and_pix2pix FAIL
Running torchbench.py pytorch_stargan...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size_4 : [#users=4] = placeholder[target=sym_size_4]
Original traceback:
Module stack: {'self_main': <class 'torch.nn.modules.container.Sequential'>, 'self_main_1': <class 'torch.nn.modules.instancenorm.InstanceNorm2d'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_stargan/model.py", line 64, in forward
return self.main(x)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size_4 : [#users=4] = placeholder[target=sym_size_4]
Original traceback:
Module stack: {'self_main': <class 'torch.nn.modules.container.Sequential'>, 'self_main_1': <class 'torch.nn.modules.instancenorm.InstanceNorm2d'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_stargan/model.py", line 64, in forward
return self.main(x)
TorchDynamo optimized model failed to run because of following error
cuda train pytorch_stargan FAIL
Running torchbench.py pytorch_struct...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_struct/networks/NeuralCFG.py", line 49, in terms
torch.einsum("vh,th->tv", self.word_emb, self.mlp1(self.term_emb))
| File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_struct/networks/NeuralCFG.py", line 77, in forward
return terms(input), rules(batch), roots(batch)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_struct/networks/NeuralCFG.py", line 49, in terms
torch.einsum("vh,th->tv", self.word_emb, self.mlp1(self.term_emb))
| File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_struct/networks/NeuralCFG.py", line 77, in forward
return terms(input), rules(batch), roots(batch)
TorchDynamo optimized model failed to run because of following error
cuda train pytorch_struct FAIL
Running torchbench.py pytorch_unet...
ERROR:common:RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 80.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 80.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 80.0000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_30,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_up1': <class 'torchbenchmark.models.pytorch_unet.pytorch_unet.unet.unet_parts.Up'>, 'self_up1_up': <class 'torch.nn.modules.upsampling.Upsample'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_unet/pytorch_unet/unet/unet_parts.py", line 57, in forward
x1 = self.up(x1)
| File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_unet/pytorch_unet/unet/unet_model.py", line 31, in forward
x = self.up1(x5, x4)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 239, in call_function
out = lowerings[target](*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 204, in wrapped
return decomp_fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 1194, in arange
return fallback_arange(
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/lowering.py", line 893, in handler
result = ir.FallbackKernel.create(kernel, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/ir.py", line 2890, in create
example_output = kernel(
File "/data/users/ezyang/pytorch-tmp/torch/_ops.py", line 446, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 80.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 80.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_unet/pytorch_unet/unet/unet_model.py", line 25, in forward
def forward(self, x):
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 880, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 600, in create_aot_dispatcher_function
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 459, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(fw_module, deduped_flat_args)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 351, in fw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 242, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: RuntimeError: Overloaded torch operator invoked from Python failed to many any schema:
aten::arange() expected at most 5 argument(s) but received 7 argument(s). Declaration: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() expected at most 6 argument(s) but received 7 argument(s). Declaration: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
aten::arange() Expected a value of type 'number' for argument 'end' but instead found type 'Float'.
Position: 1
Value: 80.0000000000000
Declaration: aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
Cast error details: Cannot cast 80.0000000000000 to number
aten::arange() expected at most 4 argument(s) but received 7 argument(s). Declaration: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
aten::arange() expected at most 2 argument(s) but received 7 argument(s). Declaration: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
target: aten.arange.default
args[0]: 80.0000000000000
kwargs: {'dtype': torch.float32, 'device': device(type='cuda', index=0), 'pin_memory': False}
While executing %arange : [#users=1] = call_function[target=torch.ops.aten.arange.default](args = (%mul_30,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
Original traceback:
Module stack: {'self_up1': <class 'torchbenchmark.models.pytorch_unet.pytorch_unet.unet.unet_parts.Up'>, 'self_up1_up': <class 'torch.nn.modules.upsampling.Upsample'>}
File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_unet/pytorch_unet/unet/unet_parts.py", line 57, in forward
x1 = self.up(x1)
| File "/data/users/ezyang/benchmark/torchbenchmark/models/pytorch_unet/pytorch_unet/unet/unet_model.py", line 31, in forward
x = self.up1(x5, x4)
TorchDynamo optimized model failed to run because of following error
cuda train pytorch_unet FAIL
Running torchbench.py resnet18...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 279, in _forward_impl
x = torch.flatten(x, 1)
| File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 285, in forward
return self._forward_impl(x)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 279, in _forward_impl
x = torch.flatten(x, 1)
| File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 285, in forward
return self._forward_impl(x)
TorchDynamo optimized model failed to run because of following error
cuda train resnet18 FAIL
Running torchbench.py resnet50...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 279, in _forward_impl
x = torch.flatten(x, 1)
| File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 285, in forward
return self._forward_impl(x)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 279, in _forward_impl
x = torch.flatten(x, 1)
| File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 285, in forward
return self._forward_impl(x)
TorchDynamo optimized model failed to run because of following error
cuda train resnet50 FAIL
Running torchbench.py resnet50_quantized_qat...
WARNING:common:fp64 golden ref were not generated for resnet50_quantized_qat
[2022-10-30 21:35:27,833] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,839] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,854] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,869] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,873] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,886] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,891] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,903] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,907] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,919] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,923] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,935] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,937] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,941] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,953] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,956] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,968] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,971] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,983] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,985] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:27,989] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,001] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,005] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,017] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,021] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,033] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,035] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,039] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,053] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,057] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,071] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,075] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,088] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,091] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,103] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,105] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,109] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,121] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,125] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,138] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,141] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,153] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,155] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,159] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,171] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,174] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,186] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,189] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,201] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,203] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,207] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,219] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,222] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,235] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,238] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,250] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,252] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,256] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,268] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,272] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,286] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,289] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,301] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,305] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,316] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,318] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,322] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,335] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,339] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,351] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,354] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,366] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,368] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,372] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,383] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,387] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,399] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,402] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,414] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,416] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,420] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,432] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,436] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,448] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,451] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,462] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,464] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,467] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,479] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,483] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,494] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,498] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,509] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,511] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,515] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,529] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,532] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,545] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,549] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,561] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,564] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,568] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,581] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,586] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,600] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,604] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,617] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,620] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,633] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,635] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,640] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,653] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,657] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,669] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,672] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,683] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,685] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,689] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,701] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,705] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,717] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,720] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,732] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,735] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,738] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,740] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,741] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
[2022-10-30 21:35:28,742] torch._inductor.ir: [WARNING] Using FallbackKernel: aten._fused_moving_avg_obs_fq_helper_functional
ERROR:common:name 's0' is not defined
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 335, in <graph break in forward_and_backward_pass>
pred = mod(*cloned_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/graph_module.py", line 660, in call_wrapped
return self._wrapped_call(self, *args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/fx/graph_module.py", line 279, in __call__
raise e
File "/data/users/ezyang/pytorch-tmp/torch/fx/graph_module.py", line 269, in __call__
return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc]
File "/data/users/ezyang/pytorch-tmp/torch/nn/modules/module.py", line 1423, in _call_impl
return forward_call(*input, **kwargs)
File "<eval_with_key>.5", line 4, in forward
def forward(self, x : torch.Tensor) -> torch.Tensor:
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 893, in forward
return compiled_f(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 885, in new_func
return compiled_fn(args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 271, in g
return f(*args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 513, in compiled_function
return CompiledFunction.apply(*remove_dupe_args(args))
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 471, in forward
fw_outs = call_func_with_args(
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 296, in call_func_with_args
out = normalize_as_list(f(args))
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 186, in run
return model(new_inputs)
File "/tmp/torchinductor_ezyang/pc/cpcvl67kxf23dyi4iyfxqfpdajqgxrwhujrc6hybabfsmffl3oyq.py", line 3492, in call
return (buf1070, orig_primals_1, orig_primals_2, orig_primals_4, orig_primals_5, orig_primals_7, orig_primals_8, orig_primals_10, orig_primals_11, orig_primals_13, orig_primals_14, orig_primals_16, orig_primals_17, orig_primals_19, orig_primals_20, orig_primals_22, orig_primals_23, orig_primals_25, orig_primals_26, orig_primals_28, orig_primals_29, orig_primals_31, orig_primals_32, orig_primals_34, orig_primals_35, orig_primals_37, orig_primals_38, orig_primals_40, orig_primals_41, orig_primals_43, orig_primals_44, orig_primals_46, orig_primals_47, orig_primals_49, orig_primals_50, orig_primals_52, orig_primals_53, orig_primals_55, orig_primals_56, orig_primals_58, orig_primals_59, orig_primals_61, orig_primals_62, orig_primals_64, orig_primals_65, orig_primals_67, orig_primals_68, orig_primals_70, orig_primals_71, orig_primals_73, orig_primals_74, orig_primals_76, orig_primals_77, orig_primals_79, orig_primals_80, orig_primals_82, orig_primals_83, orig_primals_85, orig_primals_86, orig_primals_88, orig_primals_89, orig_primals_91, orig_primals_92, orig_primals_94, orig_primals_95, orig_primals_97, orig_primals_98, orig_primals_100, orig_primals_101, orig_primals_103, orig_primals_104, orig_primals_106, orig_primals_107, orig_primals_109, orig_primals_110, orig_primals_112, orig_primals_113, orig_primals_115, orig_primals_116, orig_primals_118, orig_primals_119, orig_primals_121, orig_primals_122, orig_primals_124, orig_primals_125, orig_primals_127, orig_primals_128, orig_primals_130, orig_primals_131, orig_primals_133, orig_primals_134, orig_primals_136, orig_primals_137, orig_primals_139, orig_primals_140, orig_primals_142, orig_primals_143, orig_primals_145, orig_primals_146, orig_primals_148, orig_primals_149, orig_primals_151, orig_primals_152, orig_primals_154, orig_primals_155, orig_primals_157, orig_primals_158, orig_primals_169, orig_primals_170, orig_primals_186, orig_primals_187, orig_primals_203, orig_primals_204, orig_primals_220, orig_primals_221, orig_primals_237, orig_primals_238, orig_primals_261, orig_primals_262, orig_primals_278, orig_primals_279, orig_primals_295, orig_primals_296, orig_primals_319, orig_primals_320, orig_primals_336, orig_primals_337, orig_primals_353, orig_primals_354, orig_primals_377, orig_primals_378, orig_primals_394, orig_primals_395, orig_primals_411, orig_primals_412, orig_primals_428, orig_primals_429, orig_primals_452, orig_primals_453, orig_primals_469, orig_primals_470, orig_primals_486, orig_primals_487, orig_primals_510, orig_primals_511, orig_primals_527, orig_primals_528, orig_primals_544, orig_primals_545, orig_primals_568, orig_primals_569, orig_primals_585, orig_primals_586, orig_primals_602, orig_primals_603, orig_primals_626, orig_primals_627, orig_primals_643, orig_primals_644, orig_primals_660, orig_primals_661, orig_primals_677, orig_primals_678, orig_primals_701, orig_primals_702, orig_primals_718, orig_primals_719, orig_primals_735, orig_primals_736, orig_primals_759, orig_primals_760, orig_primals_776, orig_primals_777, orig_primals_793, orig_primals_794, orig_primals_817, orig_primals_818, orig_primals_834, orig_primals_835, orig_primals_851, orig_primals_852, orig_primals_875, orig_primals_876, orig_primals_892, orig_primals_893, orig_primals_909, orig_primals_910, orig_primals_933, orig_primals_934, orig_primals_950, orig_primals_951, orig_primals_967, orig_primals_968, orig_primals_991, orig_primals_992, orig_primals_1008, orig_primals_1009, orig_primals_1025, orig_primals_1026, orig_primals_1042, orig_primals_1043, orig_primals_1066, orig_primals_1067, orig_primals_1083, orig_primals_1084, orig_primals_1100, orig_primals_1101, orig_primals_1124, orig_primals_1125, orig_primals_1141, orig_primals_1142, orig_primals_1158, orig_primals_1159, buf1, buf9, buf10, buf1076, buf18, buf19, buf25, buf27, buf28, buf35, buf36, buf1077, buf44, buf45, buf52, buf53, buf1078, buf61, buf62, buf69, buf70, buf1079, buf79, buf86, buf87, buf1080, buf96, buf103, buf104, buf111, buf112, buf1081, buf120, buf121, buf128, buf129, buf1082, buf137, buf138, buf145, buf146, buf1083, buf155, buf162, buf163, buf170, buf171, buf1084, buf179, buf180, buf187, buf188, buf1085, buf196, buf197, buf204, buf205, buf1086, buf214, buf221, buf222, buf229, buf230, buf1087, buf238, buf239, buf246, buf247, buf1088, buf255, buf256, buf263, buf264, buf1089, buf273, buf280, buf281, buf1090, buf290, buf297, buf298, buf305, buf306, buf1091, buf314, buf315, buf322, buf323, buf1092, buf331, buf332, buf339, buf340, buf1093, buf349, buf356, buf357, buf364, buf365, buf1094, buf373, buf374, buf381, buf382, buf1095, buf390, buf391, buf398, buf399, buf1096, buf408, buf415, buf416, buf423, buf424, buf1097, buf432, buf433, buf440, buf441, buf1098, buf449, buf450, buf457, buf458, buf1099, buf467, buf474, buf475, buf482, buf483, buf1100, buf491, buf492, buf499, buf500, buf1101, buf508, buf509, buf516, buf517, buf1102, buf526, buf533, buf534, buf1103, buf543, buf550, buf551, buf558, buf559, buf1104, buf567, buf568, buf575, buf576, buf1105, buf584, buf585, buf592, buf593, buf1106, buf602, buf609, buf610, buf617, buf618, buf1107, buf626, buf627, buf634, buf635, buf1108, buf643, buf644, buf651, buf652, buf1109, buf661, buf668, buf669, buf676, buf677, buf1110, buf685, buf686, buf693, buf694, buf1111, buf702, buf703, buf710, buf711, buf1112, buf720, buf727, buf728, buf735, buf736, buf1113, buf744, buf745, buf752, buf753, buf1114, buf761, buf762, buf769, buf770, buf1115, buf779, buf786, buf787, buf794, buf795, buf1116, buf803, buf804, buf811, buf812, buf1117, buf820, buf821, buf828, buf829, buf1118, buf838, buf845, buf846, buf853, buf854, buf1119, buf862, buf863, buf870, buf871, buf1120, buf879, buf880, buf887, buf888, buf1121, buf897, buf904, buf905, buf1122, buf914, buf921, buf922, buf929, buf930, buf1123, buf938, buf939, buf946, buf947, buf1124, buf955, buf956, buf963, buf964, buf1125, buf973, buf980, buf981, buf988, buf989, buf1126, buf997, buf998, buf1005, buf1006, buf1127, buf1014, buf1015, buf1022, buf1023, buf1128, buf1032, buf1040, buf1049, buf1055, buf1056, buf1063, buf1071, as_strided(buf1062, (1000, 2048), (2048, 1)), buf1129, buf1130, buf1131, buf1132, buf1133, buf1134, buf1135, buf1136, buf1137, buf1138, buf1139, buf1140, buf1141, buf1142, buf1143, buf1144, buf1145, buf1146, buf1147, buf1148, buf1149, buf1150, buf1151, buf1152, buf1153, buf1154, buf1155, buf1156, buf1157, buf1158, buf1159, buf1160, buf1161, buf1162, buf1163, buf1164, buf1165, buf1166, buf1167, buf1168, buf1169, buf1170, buf1171, buf1172, buf1173, buf1174, buf1175, buf1176, buf1177, s0, )
NameError: name 's0' is not defined
TorchDynamo optimized model failed to run because of following error
cuda train resnet50_quantized_qat FAIL
Running torchbench.py resnext50_32x4d...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 279, in _forward_impl
x = torch.flatten(x, 1)
| File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 285, in forward
return self._forward_impl(x)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=2] = placeholder[target=sym_size]
Original traceback:
Module stack: {}
File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 279, in _forward_impl
x = torch.flatten(x, 1)
| File "/data/users/ezyang/vision/torchvision/models/resnet.py", line 285, in forward
return self._forward_impl(x)
TorchDynamo optimized model failed to run because of following error
cuda train resnext50_32x4d FAIL
Running torchbench.py shufflenet_v2_x1_0...
ERROR:common:'int' object has no attribute 'size'
While executing %sym_size : [#users=33] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_stage2': <class 'torch.nn.modules.container.Sequential'>, 'self_stage2_0': <class 'torchvision.models.shufflenetv2.InvertedResidual'>}
File "/data/users/ezyang/vision/torchvision/models/shufflenetv2.py", line 33, in channel_shuffle
x = x.view(batchsize, groups, channels_per_group, height, width)
| File "/data/users/ezyang/vision/torchvision/models/shufflenetv2.py", line 99, in forward
out = channel_shuffle(out, 2)
| File "/data/users/ezyang/vision/torchvision/models/shufflenetv2.py", line 157, in _forward_impl
x = self.stage2(x)
| File "/data/users/ezyang/vision/torchvision/models/shufflenetv2.py", line 166, in forward
return self._forward_impl(x)
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 1041, in check_accuracy
new_result = optimized_model_iter_fn(model, example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/common.py", line 945, in run_n_iterations
self.model_iter_fn(mod, inputs, collect_outputs=False)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 332, in forward_and_backward_pass
cloned_inputs = clone_inputs(inputs)
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 337, in <graph break in forward_and_backward_pass>
self.grad_scaler.scale(loss).backward()
File "/data/users/ezyang/pytorch-tmp/torch/_tensor.py", line 488, in backward
torch.autograd.backward(
File "/data/users/ezyang/pytorch-tmp/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/data/users/ezyang/pytorch-tmp/torch/autograd/function.py", line 270, in apply
return user_fn(self, *args)
File "/data/users/ezyang/pytorch-tmp/functorch/_src/aot_autograd.py", line 499, in backward
CompiledFunction.compiled_bw = aot_config.bw_compiler(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/optimizations/backends.py", line 555, in _wrapped_bw_compiler
return disable(bw_compiler(*args, **kwargs))
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 362, in bw_compiler
return compile_fx_inner(
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/debug_utils.py", line 466, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/debug.py", line 177, in inner
return fn(*args, **kwargs)
File "/home/ezyang/local/pytorch-tmp-env/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/compile_fx.py", line 122, in compile_fx_inner
graph.run(*example_inputs)
File "/data/users/ezyang/pytorch-tmp/torch/_dynamo/utils.py", line 87, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 129, in run
return super().run(*args)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 299, in run_node
result = super().run_node(n)
File "/data/users/ezyang/pytorch-tmp/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 199, in placeholder
sizes, strides = self.static_sizes_strides(example)
File "/data/users/ezyang/pytorch-tmp/torch/_inductor/graph.py", line 53, in static_sizes_strides
size = [sympy.Integer(i) for i in ex.size()]
AttributeError: 'int' object has no attribute 'size'
While executing %sym_size : [#users=33] = placeholder[target=sym_size]
Original traceback:
Module stack: {'self_stage2': <class 'torch.nn.modules.container.Sequential'>, 'self_stage2_0': <class 'torchvision.models.shufflenetv2.InvertedResidual'>}
File "/data/users/ezyang/vision/torchvision/models/shufflenetv2.py", line 33, in channel_shuffle
x = x.view(batchsize, groups, channels_per_group, height, width)
| File "/data/users/ezyang/vision/torchvision/models/shufflenetv2.py", line 99, in forward
out = channel_shuffle(out, 2)
| File "/data/users/ezyang/vision/torchvision/models/shufflenetv2.py", line 157, in _forward_impl
x = self.stage2(x)
| File "/data/users/ezyang/vision/torchvision/models/shufflenetv2.py", line 166, in forward
return self._forward_impl(x)
TorchDynamo optimized model failed to run because of following error
cuda train shufflenet_v2_x1_0 FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train soft_actor_critic FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train speech_transformer FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train squeezenet1_1 FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train tacotron2 FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train timm_efficientdet FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train timm_efficientnet FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train timm_regnet FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train timm_resnest FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train timm_vision_transformer FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train timm_vision_transformer_large FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train timm_vovnet FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train tts_angular FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train vgg16 FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train vision_maskrcnn FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/torchbench.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
cuda train yolov3 FAIL
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/huggingface.py", line 10, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/benchmarks/dynamo/timm_models.py", line 11, in <module>
import torch
File "/data/users/ezyang/pytorch-tmp/torch/__init__.py", line 1027, in <module>
from . import _meta_registrations
File "/data/users/ezyang/pytorch-tmp/torch/_meta_registrations.py", line 7, in <module>
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/__init__.py", line 168, in <module>
import torch._decomp.decompositions
File "/data/users/ezyang/pytorch-tmp/torch/_decomp/decompositions.py", line 16, in <module>
from torch.fx.experimental.symbolic_shapes import (
File "/data/users/ezyang/pytorch-tmp/torch/fx/experimental/symbolic_shapes.py", line 12, in <module>
from torch._subclasses.meta_utils import MetaConverter
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/__init__.py", line 3, in <module>
from torch._subclasses.fake_tensor import (
File "/data/users/ezyang/pytorch-tmp/torch/_subclasses/fake_tensor.py", line 893
<<<<<<< HEAD
^
SyntaxError: invalid syntax
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment