Skip to content

Instantly share code, notes, and snippets.

@davidberard98
Last active October 31, 2023 21:23
Show Gist options
  • Save davidberard98/2055540cbc607497292d391dc7a84e2b to your computer and use it in GitHub Desktop.
Save davidberard98/2055540cbc607497292d391dc7a84e2b to your computer and use it in GitHub Desktop.
/data/users/dberard/scripts/oncall/112494.py:6: UserWarning: An output with one or more elements was resized since it had shape [10, 9, 8], which does not match the required output shape [1, 9, 8]. This behavior is deprecated, and in a future PyTorch release outputs will not be resized unless they have zero elements. You can explicitly reuse an out tensor t by resizing it, inplace, to zero elements with t.resize_(0). (Triggered internally at ../aten/src/ATen/native/Resize.cpp:28.)
x = torch.var(correction=4, dim=0, input=x, keepdim=True, out=torch.rand_like(x))
/data/users/dberard/pytorch/torch/_prims_common/wrappers.py:159: UserWarning: An output with one or more elements was resized since it had shape torch.Size([s0, s1, s2]) which does not match the required output shape {str(shape)}. This behavior is deprecated, and in a future PyTorch release outputs will not be resized unless they have zero elements. You can explicitly reuse an out tensor t by resizing it, inplace, to zero elements with t.resize_(0).
warnings.warn(msg)
Traceback (most recent call last):
File "/data/users/dberard/scripts/oncall/112494.py", line 11, in <module>
op_info = torch.compile(fn, dynamic=True)(cuda_tensor, 'cuda')
File "/data/users/dberard/pytorch/torch/_dynamo/eval_frame.py", line 411, in _fn
return fn(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/_dynamo/eval_frame.py", line 559, in catch_errors
return callback(frame, cache_entry, hooks, frame_state)
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 687, in _convert_frame
result = inner_convert(frame, cache_entry, hooks, frame_state)
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 148, in _fn
return fn(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 406, in _convert_frame_assert
return _compile(
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 614, in _compile
guarded_code = compile_inner(code, one_graph, hooks, transform)
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 221, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 531, in compile_inner
out_code = transform_code_object(code, transform)
File "/data/users/dberard/pytorch/torch/_dynamo/bytecode_transformation.py", line 1028, in transform_code_object
transformations(instructions, code_options)
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 501, in transform
tracer.run()
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 2119, in run
super().run()
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 752, in run
and self.step()
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 715, in step
getattr(self, inst.opname)(inst)
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 405, in wrapper
return inner_fn(self, inst)
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 1200, in CALL_FUNCTION_KW
self.call_function(fn, args, kwargs)
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 586, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/dberard/pytorch/torch/_dynamo/variables/torch.py", line 722, in call_function
tensor_variable = wrap_fx_proxy(
File "/data/users/dberard/pytorch/torch/_dynamo/variables/builder.py", line 1341, in wrap_fx_proxy
return wrap_fx_proxy_cls(
File "/data/users/dberard/pytorch/torch/_dynamo/variables/builder.py", line 1431, in wrap_fx_proxy_cls
example_value = get_fake_value(proxy.node, tx, allow_non_graph_fake=True)
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 1483, in get_fake_value
raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 1444, in get_fake_value
ret_val = wrap_fake_exception(
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 993, in wrap_fake_exception
return fn()
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 1445, in <lambda>
lambda: run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 1548, in run_node
raise RuntimeError(fn_str + str(e)).with_traceback(e.__traceback__) from e
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 1527, in run_node
return node.target(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/utils/_stats.py", line 20, in wrapper
return fn(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/_subclasses/fake_tensor.py", line 1378, in __torch_dispatch__
return self.dispatch(func, types, args, kwargs)
File "/data/users/dberard/pytorch/torch/_subclasses/fake_tensor.py", line 1584, in dispatch
return decomposition_table[func](*args, **kwargs)
File "/data/users/dberard/pytorch/torch/_prims_common/wrappers.py", line 268, in _fn
_maybe_resize_out(out, result.shape)
File "/data/users/dberard/pytorch/torch/_prims_common/wrappers.py", line 166, in _maybe_resize_out
return out.resize_(shape)
File "/data/users/dberard/pytorch/torch/utils/_stats.py", line 20, in wrapper
return fn(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/_subclasses/fake_tensor.py", line 1378, in __torch_dispatch__
return self.dispatch(func, types, args, kwargs)
File "/data/users/dberard/pytorch/torch/_subclasses/fake_tensor.py", line 1676, in dispatch
r = func(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/_ops.py", line 513, in __call__
return self._op(*args, **kwargs or {})
torch._dynamo.exc.TorchRuntimeError: Failed running call_function <built-in method var of type object at 0x7f8dc89eaf40>(*(), **{'correction': 4, 'dim': 0, 'input': FakeTensor(..., device='cuda:0', size=(s0, s1, s2)), 'keepdim': True, 'out': FakeTensor(..., device='cuda:0', size=(s0, s1, s2))}):
Expected !size_bytes_is_heap_allocated_ to be true, but got false. (Could this error message be improved? If so, please report an enhancement request to PyTorch.)
Exception raised from nbytes at ../c10/core/StorageImpl.h:91 (most recent call first):
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x95 (0x7f8dc9716475 in /data/users/dberard/pytorch/torch/lib/libc10.so)
frame #1: c10::detail::torchCheckFail(char const*, char const*, unsigned int, char const*) + 0x68 (0x7f8dc96cbaac in /data/users/dberard/pytorch/torch/lib/libc10.so)
frame #2: <unknown function> + 0x16f395f (0x7f8dbe2f395f in /data/users/dberard/pytorch/torch/lib/libtorch_cpu.so)
frame #3: <unknown function> + 0x2a55447 (0x7f8dbf655447 in /data/users/dberard/pytorch/torch/lib/libtorch_cpu.so)
frame #4: <unknown function> + 0x69f89b (0x7f8dc809f89b in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #5: <unknown function> + 0x69c995 (0x7f8dc809c995 in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #6: <unknown function> + 0x49c45d4 (0x7f8dc15c45d4 in /data/users/dberard/pytorch/torch/lib/libtorch_cpu.so)
frame #7: torch::jit::invokeOperatorFromPython(std::vector<std::shared_ptr<torch::jit::Operator>, std::allocator<std::shared_ptr<torch::jit::Operator> > > const&, pybind11::args, pybind11::kwargs const&, c10::optional<c10::DispatchKey>) + 0x334 (0x7f8dc82a4de4 in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #8: torch::jit::_get_operation_for_overload_or_packet(std::vector<std::shared_ptr<torch::jit::Operator>, std::allocator<std::shared_ptr<torch::jit::Operator> > > const&, c10::Symbol, pybind11::args, pybind11::kwargs const&, bool, c10::optional<c10::DispatchKey>) + 0x69f (0x7f8dc82a562f in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #9: <unknown function> + 0x78f9e9 (0x7f8dc818f9e9 in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #10: <unknown function> + 0x3b8f57 (0x7f8dc7db8f57 in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #11: python() [0x507507]
<omitting python frames>
frame #14: python() [0x4e67ea]
frame #17: python() [0x5cb013]
frame #20: python() [0x4e67ea]
frame #23: python() [0x4e67ea]
frame #26: python() [0x4e67ea]
frame #28: python() [0x5052b1]
frame #29: python() [0x595396]
frame #31: torch::handle_torch_function_no_python_arg_parser(c10::ArrayRef<_object*>, _object*, _object*, char const*, _object*, char const*, torch::TorchFunctionName) + 0x423 (0x7f8dc84ddb13 in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #32: <unknown function> + 0x69aea3 (0x7f8dc809aea3 in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #33: <unknown function> + 0x134c8c9 (0x7f8dbdf4c8c9 in /data/users/dberard/pytorch/torch/lib/libtorch_cpu.so)
frame #34: <unknown function> + 0x69f89b (0x7f8dc809f89b in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #35: <unknown function> + 0x69c995 (0x7f8dc809c995 in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #36: at::_ops::resize_::call(at::Tensor const&, c10::ArrayRef<c10::SymInt>, c10::optional<c10::MemoryFormat>) + 0x34c (0x7f8dbee6a71c in /data/users/dberard/pytorch/torch/lib/libtorch_cpu.so)
frame #37: <unknown function> + 0x47b0de (0x7f8dc7e7b0de in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #38: python() [0x4f9956]
frame #40: python() [0x4f7ec3]
frame #42: python() [0x4e67ea]
frame #46: python() [0x4e67ea]
frame #49: python() [0x4e67ea]
frame #52: python() [0x4e67ea]
frame #54: python() [0x5052b1]
frame #55: python() [0x595396]
frame #57: torch::handle_torch_function_no_python_arg_parser(c10::ArrayRef<_object*>, _object*, _object*, char const*, _object*, char const*, torch::TorchFunctionName) + 0x423 (0x7f8dc84ddb13 in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #58: <unknown function> + 0x69aea3 (0x7f8dc809aea3 in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #59: <unknown function> + 0x134c8c9 (0x7f8dbdf4c8c9 in /data/users/dberard/pytorch/torch/lib/libtorch_cpu.so)
frame #60: <unknown function> + 0x69f89b (0x7f8dc809f89b in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #61: <unknown function> + 0x69c995 (0x7f8dc809c995 in /data/users/dberard/pytorch/torch/lib/libtorch_python.so)
frame #62: at::_ops::var_correction_out::redispatch(c10::DispatchKeySet, at::Tensor const&, c10::OptionalArrayRef<long>, c10::optional<c10::Scalar> const&, bool, at::Tensor&) + 0x1a8 (0x7f8dbeb80c48 in /data/users/dberard/pytorch/torch/lib/libtorch_cpu.so)
frame #63: <unknown function> + 0x409e016 (0x7f8dc0c9e016 in /data/users/dberard/pytorch/torch/lib/libtorch_cpu.so)
from user code:
File "/data/users/dberard/scripts/oncall/112494.py", line 6, in fn
x = torch.var(correction=4, dim=0, input=x, keepdim=True, out=torch.rand_like(x))
Set TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information
You can suppress this exception and fall back to eager by setting:
import torch._dynamo
torch._dynamo.config.suppress_errors = True
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment