Created
May 14, 2024 00:08
-
-
Save xmfan/7180f97a1ca97864112361e1208a69ed to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
(benchmarks) [17:02:01] ~/pytorch (main) > TORCHINDUCTOR_COMPILE_THREADS=1 pytest test/inductor/test_compiled_autograd.py -k 'test_torch_compile or test_access_saved_tensor_twice_without_recomputation_works' | |
==================================================================================================== test session starts ===================================================================================================== | |
platform linux -- Python 3.10.14, pytest-8.1.1, pluggy-1.4.0 | |
benchmark: 4.0.0 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000) | |
rootdir: /home/xmfan/pytorch | |
configfile: pytest.ini | |
plugins: hypothesis-6.100.1, benchmark-4.0.0, hydra-core-1.3.2, typeguard-4.2.1 | |
collected 455 items / 453 deselected / 2 selected | |
Running 2 items in this shard | |
test/inductor/test_compiled_autograd.py .F [100%] | |
========================================================================================================== FAILURES ========================================================================================================== | |
________________________________________________________________ TestAutogradWithCompiledAutograd.test_access_saved_tensor_twice_without_recomputation_works _________________________________________________________________ | |
Traceback (most recent call last): | |
File "/home/xmfan/.conda/envs/benchmarks/lib/python3.10/unittest/case.py", line 59, in testPartExecutor | |
yield | |
File "/home/xmfan/.conda/envs/benchmarks/lib/python3.10/unittest/case.py", line 591, in run | |
self._callTestMethod(testMethod) | |
File "/home/xmfan/.conda/envs/benchmarks/lib/python3.10/unittest/case.py", line 549, in _callTestMethod | |
method() | |
File "/home/xmfan/pytorch/torch/testing/_internal/common_utils.py", line 2756, in wrapper | |
method(*args, **kwargs) | |
File "/home/xmfan/pytorch/test/inductor/test_compiled_autograd.py", line 1594, in wrapped | |
out = fn(self) | |
File "/home/xmfan/pytorch/test/test_autograd.py", line 6850, in test_access_saved_tensor_twice_without_recomputation_works | |
d.sum().backward() | |
File "/home/xmfan/pytorch/torch/_tensor.py", line 523, in backward | |
torch.autograd.backward( | |
File "/home/xmfan/pytorch/torch/autograd/__init__.py", line 267, in backward | |
_engine_run_backward( | |
File "/home/xmfan/pytorch/torch/autograd/graph.py", line 767, in _engine_run_backward | |
return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass | |
File "/home/xmfan/pytorch/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl | |
return self._call_impl(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/nn/modules/module.py", line 1541, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/_dynamo/eval_frame.py", line 420, in _fn | |
return fn(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/fx/graph_module.py", line 736, in call_wrapped | |
return self._wrapped_call(self, *args, **kwargs) | |
File "/home/xmfan/pytorch/torch/fx/graph_module.py", line 315, in __call__ | |
raise e | |
File "/home/xmfan/pytorch/torch/fx/graph_module.py", line 302, in __call__ | |
return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc] | |
File "/home/xmfan/pytorch/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl | |
return self._call_impl(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/nn/modules/module.py", line 1541, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/_dynamo/convert_frame.py", line 986, in catch_errors | |
return callback(frame, cache_entry, hooks, frame_state, skip=1) | |
File "/home/xmfan/pytorch/torch/_dynamo/convert_frame.py", line 381, in _convert_frame_assert | |
return _compile( | |
File "/home/xmfan/pytorch/torch/_utils_internal.py", line 70, in wrapper_function | |
return function(*args, **kwargs) | |
File "/home/xmfan/.conda/envs/benchmarks/lib/python3.10/contextlib.py", line 79, in inner | |
return func(*args, **kwds) | |
File "/home/xmfan/pytorch/torch/_dynamo/convert_frame.py", line 708, in _compile | |
guarded_code = compile_inner(code, one_graph, hooks, transform) | |
File "/home/xmfan/pytorch/torch/_dynamo/utils.py", line 273, in time_wrapper | |
r = func(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/_dynamo/convert_frame.py", line 543, in compile_inner | |
out_code = transform_code_object(code, transform) | |
File "/home/xmfan/pytorch/torch/_dynamo/bytecode_transformation.py", line 1167, in transform_code_object | |
transformations(instructions, code_options) | |
File "/home/xmfan/pytorch/torch/_dynamo/convert_frame.py", line 172, in _fn | |
return fn(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/_dynamo/convert_frame.py", line 490, in transform | |
tracer.run() | |
File "/home/xmfan/pytorch/torch/_dynamo/symbolic_convert.py", line 2234, in run | |
super().run() | |
File "/home/xmfan/pytorch/torch/_dynamo/symbolic_convert.py", line 884, in run | |
while self.step(): | |
File "/home/xmfan/pytorch/torch/_dynamo/symbolic_convert.py", line 799, in step | |
self.dispatch_table[inst.opcode](self, inst) | |
File "/home/xmfan/pytorch/torch/_dynamo/symbolic_convert.py", line 2423, in RETURN_VALUE | |
self._return(inst) | |
File "/home/xmfan/pytorch/torch/_dynamo/symbolic_convert.py", line 2408, in _return | |
self.output.compile_subgraph( | |
File "/home/xmfan/pytorch/torch/_dynamo/output_graph.py", line 1108, in compile_subgraph | |
self.compile_and_call_fx_graph(tx, pass2.graph_output_vars(), root) | |
File "/home/xmfan/.conda/envs/benchmarks/lib/python3.10/contextlib.py", line 79, in inner | |
return func(*args, **kwds) | |
File "/home/xmfan/pytorch/torch/_dynamo/output_graph.py", line 1300, in compile_and_call_fx_graph | |
compiled_fn = self.call_user_compiler(gm) | |
File "/home/xmfan/pytorch/torch/_dynamo/utils.py", line 273, in time_wrapper | |
r = func(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/_dynamo/output_graph.py", line 1391, in call_user_compiler | |
raise BackendCompilerFailed(self.compiler_fn, e).with_traceback( | |
File "/home/xmfan/pytorch/torch/_dynamo/output_graph.py", line 1372, in call_user_compiler | |
compiled_fn = compiler_fn(gm, self.example_inputs()) | |
File "/home/xmfan/pytorch/torch/_dynamo/repro/after_dynamo.py", line 127, in debug_wrapper | |
compiled_gm = compiler_fn(gm, example_inputs) | |
File "/home/xmfan/pytorch/torch/_dynamo/repro/after_dynamo.py", line 127, in debug_wrapper | |
compiled_gm = compiler_fn(gm, example_inputs) | |
File "/home/xmfan/pytorch/torch/__init__.py", line 1786, in __call__ | |
return self.compiler_fn(model_, inputs_, **self.kwargs) | |
File "/home/xmfan/pytorch/test/inductor/test_compiled_autograd.py", line 28, in _inner_compiler | |
return inductor.compile(gm_, example_inputs_) | |
File "/home/xmfan/pytorch/torch/_inductor/__init__.py", line 28, in compile | |
return compile_fx(gm, example_inputs, config_patches=options) | |
File "/home/xmfan/.conda/envs/benchmarks/lib/python3.10/contextlib.py", line 79, in inner | |
return func(*args, **kwds) | |
File "/home/xmfan/pytorch/torch/_inductor/compile_fx.py", line 1300, in compile_fx | |
return flatten_graph_inputs( | |
File "/home/xmfan/pytorch/torch/_dynamo/utils.py", line 2678, in flatten_graph_inputs | |
compiled_fn = compile_gm(GmWrapper(gm, spec), inputs) | |
File "/home/xmfan/.conda/envs/benchmarks/lib/python3.10/contextlib.py", line 79, in inner | |
return func(*args, **kwds) | |
File "/home/xmfan/pytorch/torch/_inductor/compile_fx.py", line 1478, in compile_fx | |
return aot_autograd( | |
File "/home/xmfan/pytorch/torch/_dynamo/backends/common.py", line 65, in compiler_fn | |
cg = aot_module_simplified(gm, example_inputs, **kwargs) | |
File "/home/xmfan/pytorch/torch/_functorch/aot_autograd.py", line 962, in aot_module_simplified | |
compiled_fn = create_aot_dispatcher_function( | |
File "/home/xmfan/pytorch/torch/_dynamo/utils.py", line 273, in time_wrapper | |
r = func(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/_functorch/aot_autograd.py", line 683, in create_aot_dispatcher_function | |
compiled_fn = compiler_fn( | |
File "/home/xmfan/pytorch/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py", line 177, in aot_dispatch_base | |
compiled_fw = compiler(fw_module, updated_flat_args) | |
File "/home/xmfan/pytorch/torch/_dynamo/utils.py", line 273, in time_wrapper | |
r = func(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/_inductor/compile_fx.py", line 1382, in fw_compiler_base | |
return inner_compile( | |
File "/home/xmfan/pytorch/torch/_dynamo/repro/after_aot.py", line 83, in debug_wrapper | |
inner_compiled_fn = compiler_fn(gm, example_inputs) | |
File "/home/xmfan/pytorch/torch/_inductor/debug.py", line 304, in inner | |
return fn(*args, **kwargs) | |
File "/home/xmfan/.conda/envs/benchmarks/lib/python3.10/contextlib.py", line 79, in inner | |
return func(*args, **kwds) | |
File "/home/xmfan/.conda/envs/benchmarks/lib/python3.10/contextlib.py", line 79, in inner | |
return func(*args, **kwds) | |
File "/home/xmfan/pytorch/torch/_dynamo/utils.py", line 273, in time_wrapper | |
r = func(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/_inductor/compile_fx.py", line 507, in compile_fx_inner | |
compiled_graph = fx_codegen_and_compile( | |
File "/home/xmfan/.conda/envs/benchmarks/lib/python3.10/contextlib.py", line 79, in inner | |
return func(*args, **kwds) | |
File "/home/xmfan/pytorch/torch/_inductor/compile_fx.py", line 803, in fx_codegen_and_compile | |
compiled_fn = graph.compile_to_fn() | |
File "/home/xmfan/pytorch/torch/_inductor/graph.py", line 1736, in compile_to_fn | |
return self.compile_to_module().call | |
File "/home/xmfan/pytorch/torch/_dynamo/utils.py", line 273, in time_wrapper | |
r = func(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/_inductor/graph.py", line 1680, in compile_to_module | |
mod = PyCodeCache.load_by_key_path( | |
File "/home/xmfan/pytorch/torch/_inductor/codecache.py", line 2574, in load_by_key_path | |
mod = _reload_python_module(key, path) | |
File "/home/xmfan/pytorch/torch/_inductor/runtime/compile_tasks.py", line 44, in _reload_python_module | |
exec(code, mod.__dict__, mod.__dict__) | |
File "/tmp/torchinductor_xmfan/yj/cyjgbaaz6nvbdqhclwp6rtfxn3xa2zedksfjqj6gfze6zpu5icy2.py", line 30, in <module> | |
cpp_fused_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' | |
File "/home/xmfan/pytorch/torch/_inductor/codecache.py", line 3103, in cpp_pybinding | |
return CppPythonBindingsCodeCache.load_pybinding(argtypes, source_code) | |
File "/home/xmfan/pytorch/torch/_inductor/codecache.py", line 2476, in load_pybinding | |
return cls.load_pybinding_async(*args, **kwargs)() | |
File "/home/xmfan/pytorch/torch/_inductor/codecache.py", line 2468, in future | |
result = get_result() | |
File "/home/xmfan/pytorch/torch/_inductor/codecache.py", line 2297, in load_fn | |
worker_fn() | |
File "/home/xmfan/pytorch/torch/_inductor/codecache.py", line 2321, in _worker_compile_cpp | |
compile_file(input_path, output_path, shlex.split(cmd)) | |
File "/home/xmfan/pytorch/torch/_dynamo/utils.py", line 273, in time_wrapper | |
r = func(*args, **kwargs) | |
File "/home/xmfan/pytorch/torch/_inductor/codecache.py", line 2192, in compile_file | |
raise exc.CppCompileError(cmd, output) from e | |
torch._dynamo.exc.BackendCompilerFailed: backend='_inner_compiler' raised: | |
CppCompileError: C++ compile error | |
Command: | |
g++ /tmp/torchinductor_xmfan/lh/clhrbhlwqp27dkmaousc67s4s4jg437zv3ofv7ultjyuakogrjbk.cpp -shared -fPIC -Wall -std=c++17 -Wno-unused-variable -Wno-unknown-pragmas -D_GLIBCXX_USE_CXX11_ABI=1 -I/home/xmfan/pytorch/torch/include -I/home/xmfan/pytorch/torch/include/torch/csrc/api/include -I/home/xmfan/pytorch/torch/include/TH -I/home/xmfan/pytorch/torch/include/THC -I/home/xmfan/.conda/envs/benchmarks/include/python3.10 -L/home/xmfan/pytorch/torch/lib -L/home/xmfan/.conda/envs/benchmarks/lib -L/home/xmfan/pytorch/torch/lib -ltorch -ltorch_cpu -lgomp -ltorch_python -lc10 -mavx512f -mavx512dq -mavx512vl -mavx512bw -mfma -DCPU_CAPABILITY_AVX512 -O3 -DNDEBUG -ffast-math -fno-finite-math-only -fno-unsafe-math-optimizations -ffp-contract=off -march=native -fopenmp -D C10_USING_CUSTOM_GENERATED_MACROS -o /tmp/torchinductor_xmfan/lh/clhrbhlwqp27dkmaousc67s4s4jg437zv3ofv7ultjyuakogrjbk.so | |
Output: | |
/tmp/torchinductor_xmfan/lh/clhrbhlwqp27dkmaousc67s4s4jg437zv3ofv7ultjyuakogrjbk.cpp:2:10: fatal error: /tmp/tmp7otdsa_b/rq/crq573iugmokkndxawm743sgoqnmhemtfiwhap5ducjuyma5rxco.h: No such file or directory | |
2 | #include "/tmp/tmp7otdsa_b/rq/crq573iugmokkndxawm743sgoqnmhemtfiwhap5ducjuyma5rxco.h" | |
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
compilation terminated. | |
Set TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information | |
You can suppress this exception and fall back to eager by setting: | |
import torch._dynamo | |
torch._dynamo.config.suppress_errors = True | |
To execute this test, run the following from the base repo dir: | |
python test/test_autograd.py -k TestAutogradWithCompiledAutograd.test_access_saved_tensor_twice_without_recomputation_works | |
This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0 | |
================================================================================================== short test summary info =================================================================================================== | |
FAILED [0.1139s] test/inductor/test_compiled_autograd.py::TestAutogradWithCompiledAutograd::test_access_saved_tensor_twice_without_recomputation_works - torch._dynamo.exc.BackendCompilerFailed: backend='_inner_compiler' raised: | |
======================================================================================== 1 failed, 1 passed, 453 deselected in 24.61s ======================================================================================== | |
(benchmarks) [17:02:32] ~/pytorch (main) > |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment