Skip to content

Instantly share code, notes, and snippets.

@davidberard98
Last active October 31, 2023 21:31
Show Gist options
  • Save davidberard98/d9b91e36dad74b677c49fdaf2263db28 to your computer and use it in GitHub Desktop.
Save davidberard98/d9b91e36dad74b677c49fdaf2263db28 to your computer and use it in GitHub Desktop.
/data/users/dberard/scripts/oncall/112502.py:7: UserWarning: An output with one or more elements was resized since it had shape [9, 10], which does not match the required output shape [9]. This behavior is deprecated, and in a future PyTorch release outputs will not be resized unless they have zero elements. You can explicitly reuse an out tensor t by resizing it, inplace, to zero elements with t.resize_(0). (Triggered internally at ../aten/src/ATen/native/Resize.cpp:28.)
x = torch.diag(input=x, diagonal=0,out=torch.rand([9, 10], dtype=torch.float32).to('cpu'))
build succeded
/data/users/dberard/pytorch/torch/_prims_common/wrappers.py:159: UserWarning: An output with one or more elements was resized since it had shape torch.Size([9, 10]) which does not match the required output shape {str(shape)}. This behavior is deprecated, and in a future PyTorch release outputs will not be resized unless they have zero elements. You can explicitly reuse an out tensor t by resizing it, inplace, to zero elements with t.resize_(0).
warnings.warn(msg)
Traceback (most recent call last):
File "/data/users/dberard/scripts/oncall/112502.py", line 13, in <module>
op_info = torch.compile(forward, mode='max-autotune',fullgraph=False,dynamic=True)(cuda_tensor, 'cuda')
File "/data/users/dberard/pytorch/torch/_dynamo/eval_frame.py", line 411, in _fn
return fn(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/_dynamo/eval_frame.py", line 559, in catch_errors
return callback(frame, cache_entry, hooks, frame_state)
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 687, in _convert_frame
result = inner_convert(frame, cache_entry, hooks, frame_state)
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 148, in _fn
return fn(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 406, in _convert_frame_assert
return _compile(
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 614, in _compile
guarded_code = compile_inner(code, one_graph, hooks, transform)
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 221, in time_wrapper
r = func(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 531, in compile_inner
out_code = transform_code_object(code, transform)
File "/data/users/dberard/pytorch/torch/_dynamo/bytecode_transformation.py", line 1028, in transform_code_object
transformations(instructions, code_options)
File "/data/users/dberard/pytorch/torch/_dynamo/convert_frame.py", line 501, in transform
tracer.run()
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 2119, in run
super().run()
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 752, in run
and self.step()
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 715, in step
getattr(self, inst.opname)(inst)
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 405, in wrapper
return inner_fn(self, inst)
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 1200, in CALL_FUNCTION_KW
self.call_function(fn, args, kwargs)
File "/data/users/dberard/pytorch/torch/_dynamo/symbolic_convert.py", line 586, in call_function
self.push(fn.call_function(self, args, kwargs))
File "/data/users/dberard/pytorch/torch/_dynamo/variables/torch.py", line 722, in call_function
tensor_variable = wrap_fx_proxy(
File "/data/users/dberard/pytorch/torch/_dynamo/variables/builder.py", line 1341, in wrap_fx_proxy
return wrap_fx_proxy_cls(
File "/data/users/dberard/pytorch/torch/_dynamo/variables/builder.py", line 1431, in wrap_fx_proxy_cls
example_value = get_fake_value(proxy.node, tx, allow_non_graph_fake=True)
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 1483, in get_fake_value
raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 1444, in get_fake_value
ret_val = wrap_fake_exception(
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 993, in wrap_fake_exception
return fn()
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 1445, in <lambda>
lambda: run_node(tx.output, node, args, kwargs, nnmodule)
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 1548, in run_node
raise RuntimeError(fn_str + str(e)).with_traceback(e.__traceback__) from e
File "/data/users/dberard/pytorch/torch/_dynamo/utils.py", line 1527, in run_node
return node.target(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/utils/_stats.py", line 20, in wrapper
return fn(*args, **kwargs)
File "/data/users/dberard/pytorch/torch/_subclasses/fake_tensor.py", line 1378, in __torch_dispatch__
return self.dispatch(func, types, args, kwargs)
File "/data/users/dberard/pytorch/torch/_subclasses/fake_tensor.py", line 1680, in dispatch
return self.wrap_meta_outputs_with_default_device_logic(r, func, args, kwargs)
File "/data/users/dberard/pytorch/torch/_subclasses/fake_tensor.py", line 1746, in wrap_meta_outputs_with_default_device_logic
return tree_map(partial(wrap), r)
File "/data/users/dberard/pytorch/torch/utils/_pytree.py", line 425, in tree_map
return tree_unflatten([fn(i) for i in flat_args], spec)
File "/data/users/dberard/pytorch/torch/utils/_pytree.py", line 425, in <listcomp>
return tree_unflatten([fn(i) for i in flat_args], spec)
File "/data/users/dberard/pytorch/torch/_subclasses/fake_tensor.py", line 1763, in wrap
) = FakeTensor._find_common_device(func, args, kwargs)
File "/data/users/dberard/pytorch/torch/_subclasses/fake_tensor.py", line 1257, in _find_common_device
pytree.tree_map_(merge_devices, kwargs)
File "/data/users/dberard/pytorch/torch/utils/_pytree.py", line 430, in tree_map_
deque(map(fn, flat_args), maxlen=0) # consume and exhaust the iterable
File "/data/users/dberard/pytorch/torch/_subclasses/fake_tensor.py", line 1252, in merge_devices
raise RuntimeError(
torch._dynamo.exc.TorchRuntimeError: Failed running call_function <built-in method diag of type object at 0x7fd1cebeaf40>(*(), **{'input': FakeTensor(..., device='cuda:0', size=(s0, s1)), 'diagonal': 0, 'out': FakeTensor(..., size=(s0,))}):
Unhandled FakeTensor Device Propagation for aten.diagonal_copy.out, found two different devices cuda:0, cpu
from user code:
File "/data/users/dberard/scripts/oncall/112502.py", line 7, in forward
x = torch.diag(input=x, diagonal=0,out=torch.rand([9, 10], dtype=torch.float32).to('cpu'))
Set TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information
You can suppress this exception and fall back to eager by setting:
import torch._dynamo
torch._dynamo.config.suppress_errors = True
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment