Created
March 30, 2024 23:09
-
-
Save winglian/348f792e62386007bc589667f01d2cae to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl | |
return forward_call(*args, **kwargs) | |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
File "/workspace/data/huggingface-cache/hub/modules/transformers_modules/LnL-AI/dbrx-base-converted-v2/6fd917c6db71ebb6a25612aa8eb4d453c560e622/modeling_dbrx.py", line 664, in forward | |
hidden_states, attn_weights, past_key_value = self.attn( | |
^^^^^^^^^^ | |
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl return self._call_impl(*args, **kwargs) | |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl | |
return forward_call(*args, **kwargs) | |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
File "/workspace/data/huggingface-cache/hub/modules/transformers_modules/LnL-AI/dbrx-base-converted-v2/6fd917c6db71ebb6a25612aa8eb4d453c560e622/modeling_dbrx.py", line 322, in forward | |
query_states = self.q_proj(hidden_states) | |
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl | |
return self._call_impl(*args, **kwargs) | |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl | |
return forward_call(*args, **kwargs) | |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/peft/tuners/lora/bnb.py", line 474, in forward | |
output = lora_B(lora_A(dropout(x))) * scaling | |
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl | |
return self._call_impl(*args, **kwargs) | |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl | |
return forward_call(*args, **kwargs) | |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py", line 835, in forward | |
args, kwargs = _pre_forward( | |
^^^^^^^^^^^^^ | |
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/distributed/fsdp/_runtime_utils.py", line 380, in _pre_forward | |
unshard_fn(state, handle) | |
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/distributed/fsdp/_runtime_utils.py", line 415, in _pre_forward_unshard | |
_unshard(state, handle, state._unshard_stream, state._pre_unshard_stream) | |
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/distributed/fsdp/_runtime_utils.py", line 297, in _unshard | |
event.synchronize() | |
File "/root/miniconda3/envs/py3.11/lib/python3.11/site-packages/torch/cuda/streams.py", line 224, in synchronize | |
super().synchronize() | |
RuntimeError: CUDA error: an illegal memory access was encountered |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment