Skip to content

Instantly share code, notes, and snippets.

@davidberard98
Last active April 23, 2024 23:53
Show Gist options
  • Save davidberard98/877a52f6ea57025cc122d64361e598da to your computer and use it in GitHub Desktop.
Save davidberard98/877a52f6ea57025cc122d64361e598da to your computer and use it in GitHub Desktop.
/home/dberard/local/pytorch/torch/backends/cudnn/__init__.py:106: UserWarning: PyTorch was compiled without cuDNN/MIOpen support. To use cuDNN/MIOpen, rebuild PyTorch making sure the library is visible to the build system.
warnings.warn(
/home/dberard/local/pytorch/torch/backends/cudnn/__init__.py:106: UserWarning: PyTorch was compiled without cuDNN/MIOpen support. To use cuDNN/MIOpen, rebuild PyTorch making sure the library is visible to the build system.
warnings.warn(
/home/dberard/local/miniconda3/envs/pytorch/lib/python3.10/site-packages/z3/z3core.py:5: DeprecationWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html
import pkg_resources
/home/dberard/local/miniconda3/envs/pytorch/lib/python3.10/site-packages/pkg_resources/__init__.py:2871: DeprecationWarning: Deprecated call to `pkg_resources.declare_namespace('ruamel')`.
Implementing implicit namespace packages (as specified in PEP 420) is preferred to `pkg_resources.declare_namespace`. See https://setuptools.pypa.io/en/latest/references/keywords.html#keyword-namespace-packages
declare_namespace(pkg)
E/home/dberard/local/pytorch/torch/backends/cudnn/__init__.py:106: UserWarning: PyTorch was compiled without cuDNN/MIOpen support. To use cuDNN/MIOpen, rebuild PyTorch making sure the library is visible to the build system.
warnings.warn(
E
======================================================================
ERROR: test_return_nt_constructed_in_graph_cpu (__main__.TestNestedTensorSubclassCPU)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/dberard/local/pytorch/torch/testing/_internal/common_utils.py", line 2744, in wrapper
method(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/testing/_internal/common_device_type.py", line 432, in instantiated_test
raise rte
File "/home/dberard/local/pytorch/torch/testing/_internal/common_device_type.py", line 419, in instantiated_test
result = test(self, **param_kwargs)
File "/home/dberard/local/pytorch/test/test_nestedtensor.py", line 4012, in test_return_nt_constructed_in_graph
res = fn_c(values, offsets)
File "/home/dberard/local/pytorch/torch/_dynamo/eval_frame.py", line 403, in _fn
return fn(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 977, in catch_errors
return callback(frame, cache_entry, hooks, frame_state, skip=1)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 411, in _convert_frame_assert
return _compile(
File "/home/dberard/local/pytorch/torch/_utils_internal.py", line 70, in wrapper_function
return function(*args, **kwargs)
File "/home/dberard/local/miniconda3/envs/pytorch/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 700, in _compile
guarded_code = compile_inner(code, one_graph, hooks, transform)
File "/home/dberard/local/pytorch/torch/_dynamo/utils.py", line 268, in time_wrapper
r = func(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 568, in compile_inner
out_code = transform_code_object(code, transform)
File "/home/dberard/local/pytorch/torch/_dynamo/bytecode_transformation.py", line 1116, in transform_code_object
transformations(instructions, code_options)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 173, in _fn
return fn(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 515, in transform
tracer.run()
File "/home/dberard/local/pytorch/torch/_dynamo/symbolic_convert.py", line 2237, in run
super().run()
File "/home/dberard/local/pytorch/torch/_dynamo/symbolic_convert.py", line 875, in run
while self.step():
File "/home/dberard/local/pytorch/torch/_dynamo/symbolic_convert.py", line 790, in step
self.dispatch_table[inst.opcode](self, inst)
File "/home/dberard/local/pytorch/torch/_dynamo/symbolic_convert.py", line 2394, in RETURN_VALUE
self._return(inst)
File "/home/dberard/local/pytorch/torch/_dynamo/symbolic_convert.py", line 2379, in _return
self.output.compile_subgraph(
File "/home/dberard/local/pytorch/torch/_dynamo/output_graph.py", line 1057, in compile_subgraph
self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root)
File "/home/dberard/local/miniconda3/envs/pytorch/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/home/dberard/local/pytorch/torch/_dynamo/output_graph.py", line 1274, in compile_and_call_fx_graph
compiled_fn = self.call_user_compiler(gm)
File "/home/dberard/local/pytorch/torch/_dynamo/utils.py", line 268, in time_wrapper
r = func(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/_dynamo/output_graph.py", line 1365, in call_user_compiler
raise BackendCompilerFailed(self.compiler_fn, e).with_traceback(
File "/home/dberard/local/pytorch/torch/_dynamo/output_graph.py", line 1346, in call_user_compiler
compiled_fn = compiler_fn(gm, self.example_inputs())
File "/home/dberard/local/pytorch/torch/_dynamo/repro/after_dynamo.py", line 127, in debug_wrapper
compiled_gm = compiler_fn(gm, example_inputs)
File "/home/dberard/local/pytorch/torch/_dynamo/repro/after_dynamo.py", line 127, in debug_wrapper
compiled_gm = compiler_fn(gm, example_inputs)
File "/home/dberard/local/pytorch/torch/__init__.py", line 1742, in __call__
return compile_fx(model_, inputs_, config_patches=self.config)
File "/home/dberard/local/miniconda3/envs/pytorch/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/home/dberard/local/pytorch/torch/_inductor/compile_fx.py", line 1416, in compile_fx
return aot_autograd(
File "/home/dberard/local/pytorch/torch/_dynamo/backends/common.py", line 65, in compiler_fn
cg = aot_module_simplified(gm, example_inputs, **kwargs)
File "/home/dberard/local/pytorch/torch/_functorch/aot_autograd.py", line 958, in aot_module_simplified
compiled_fn = create_aot_dispatcher_function(
File "/home/dberard/local/pytorch/torch/_dynamo/utils.py", line 268, in time_wrapper
r = func(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/_functorch/aot_autograd.py", line 558, in create_aot_dispatcher_function
fw_metadata = run_functionalized_fw_and_collect_metadata(
File "/home/dberard/local/pytorch/torch/_functorch/_aot_autograd/collect_metadata_analysis.py", line 547, in inner
dynamic_dims = {
File "/home/dberard/local/pytorch/torch/_functorch/_aot_autograd/collect_metadata_analysis.py", line 548, in <setcomp>
i for i, s in enumerate(o.shape) if not is_concrete_int(s)
File "/home/dberard/local/pytorch/torch/fx/experimental/symbolic_shapes.py", line 217, in is_concrete_int
if isinstance(a.node.expr, sympy.core.numbers.Integer):
torch._dynamo.exc.BackendCompilerFailed: backend='inductor' raised:
AttributeError: 'torch._C._SymNode' object has no attribute 'expr'
Set TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information
You can suppress this exception and fall back to eager by setting:
import torch._dynamo
torch._dynamo.config.suppress_errors = True
To execute this test, run the following from the base repo dir:
python test/test_nestedtensor.py -k test_return_nt_constructed_in_graph_cpu
This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0
======================================================================
ERROR: test_return_nt_constructed_in_graph_cuda (__main__.TestNestedTensorSubclassCUDA)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/dberard/local/pytorch/torch/testing/_internal/common_utils.py", line 2744, in wrapper
method(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/testing/_internal/common_utils.py", line 2744, in wrapper
method(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/testing/_internal/common_device_type.py", line 432, in instantiated_test
raise rte
File "/home/dberard/local/pytorch/torch/testing/_internal/common_device_type.py", line 419, in instantiated_test
result = test(self, **param_kwargs)
File "/home/dberard/local/pytorch/test/test_nestedtensor.py", line 4012, in test_return_nt_constructed_in_graph
res = fn_c(values, offsets)
File "/home/dberard/local/pytorch/torch/_dynamo/eval_frame.py", line 403, in _fn
return fn(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 977, in catch_errors
return callback(frame, cache_entry, hooks, frame_state, skip=1)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 411, in _convert_frame_assert
return _compile(
File "/home/dberard/local/pytorch/torch/_utils_internal.py", line 70, in wrapper_function
return function(*args, **kwargs)
File "/home/dberard/local/miniconda3/envs/pytorch/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 700, in _compile
guarded_code = compile_inner(code, one_graph, hooks, transform)
File "/home/dberard/local/pytorch/torch/_dynamo/utils.py", line 268, in time_wrapper
r = func(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 568, in compile_inner
out_code = transform_code_object(code, transform)
File "/home/dberard/local/pytorch/torch/_dynamo/bytecode_transformation.py", line 1116, in transform_code_object
transformations(instructions, code_options)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 173, in _fn
return fn(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/_dynamo/convert_frame.py", line 515, in transform
tracer.run()
File "/home/dberard/local/pytorch/torch/_dynamo/symbolic_convert.py", line 2237, in run
super().run()
File "/home/dberard/local/pytorch/torch/_dynamo/symbolic_convert.py", line 875, in run
while self.step():
File "/home/dberard/local/pytorch/torch/_dynamo/symbolic_convert.py", line 790, in step
self.dispatch_table[inst.opcode](self, inst)
File "/home/dberard/local/pytorch/torch/_dynamo/symbolic_convert.py", line 2394, in RETURN_VALUE
self._return(inst)
File "/home/dberard/local/pytorch/torch/_dynamo/symbolic_convert.py", line 2379, in _return
self.output.compile_subgraph(
File "/home/dberard/local/pytorch/torch/_dynamo/output_graph.py", line 1057, in compile_subgraph
self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root)
File "/home/dberard/local/miniconda3/envs/pytorch/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/home/dberard/local/pytorch/torch/_dynamo/output_graph.py", line 1274, in compile_and_call_fx_graph
compiled_fn = self.call_user_compiler(gm)
File "/home/dberard/local/pytorch/torch/_dynamo/utils.py", line 268, in time_wrapper
r = func(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/_dynamo/output_graph.py", line 1365, in call_user_compiler
raise BackendCompilerFailed(self.compiler_fn, e).with_traceback(
File "/home/dberard/local/pytorch/torch/_dynamo/output_graph.py", line 1346, in call_user_compiler
compiled_fn = compiler_fn(gm, self.example_inputs())
File "/home/dberard/local/pytorch/torch/_dynamo/repro/after_dynamo.py", line 127, in debug_wrapper
compiled_gm = compiler_fn(gm, example_inputs)
File "/home/dberard/local/pytorch/torch/_dynamo/repro/after_dynamo.py", line 127, in debug_wrapper
compiled_gm = compiler_fn(gm, example_inputs)
File "/home/dberard/local/pytorch/torch/__init__.py", line 1742, in __call__
return compile_fx(model_, inputs_, config_patches=self.config)
File "/home/dberard/local/miniconda3/envs/pytorch/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/home/dberard/local/pytorch/torch/_inductor/compile_fx.py", line 1416, in compile_fx
return aot_autograd(
File "/home/dberard/local/pytorch/torch/_dynamo/backends/common.py", line 65, in compiler_fn
cg = aot_module_simplified(gm, example_inputs, **kwargs)
File "/home/dberard/local/pytorch/torch/_functorch/aot_autograd.py", line 958, in aot_module_simplified
compiled_fn = create_aot_dispatcher_function(
File "/home/dberard/local/pytorch/torch/_dynamo/utils.py", line 268, in time_wrapper
r = func(*args, **kwargs)
File "/home/dberard/local/pytorch/torch/_functorch/aot_autograd.py", line 558, in create_aot_dispatcher_function
fw_metadata = run_functionalized_fw_and_collect_metadata(
File "/home/dberard/local/pytorch/torch/_functorch/_aot_autograd/collect_metadata_analysis.py", line 547, in inner
dynamic_dims = {
File "/home/dberard/local/pytorch/torch/_functorch/_aot_autograd/collect_metadata_analysis.py", line 548, in <setcomp>
i for i, s in enumerate(o.shape) if not is_concrete_int(s)
File "/home/dberard/local/pytorch/torch/fx/experimental/symbolic_shapes.py", line 217, in is_concrete_int
if isinstance(a.node.expr, sympy.core.numbers.Integer):
torch._dynamo.exc.BackendCompilerFailed: backend='inductor' raised:
AttributeError: 'torch._C._SymNode' object has no attribute 'expr'
Set TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information
You can suppress this exception and fall back to eager by setting:
import torch._dynamo
torch._dynamo.config.suppress_errors = True
To execute this test, run the following from the base repo dir:
python test/test_nestedtensor.py -k test_return_nt_constructed_in_graph_cuda
This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0
----------------------------------------------------------------------
Ran 2 tests in 3.490s
FAILED (errors=2)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment