[ROCm][Bugfix] Fixed several bugs related to rccl path and attention selector logic (#3699)

This commit is contained in:
Hongxia Yang 2024-03-29 17:52:36 -04:00 committed by GitHub
parent 430530fc18
commit 9765b5c406
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 5 additions and 5 deletions

View File

@ -90,6 +90,6 @@ RUN cd /app \
&& cd ..
RUN python3 -m pip install --upgrade pip
RUN python3 -m pip install --no-cache-dir ray[all]
RUN python3 -m pip install --no-cache-dir ray[all]==2.9.3
CMD ["/bin/bash"]

View File

@ -5,7 +5,7 @@ starlette
requests
py-cpuinfo
psutil
ray >= 2.9
ray == 2.9.3
sentencepiece # Required for LLaMA tokenizer.
numpy
tokenizers>=0.15.0

View File

@ -405,8 +405,8 @@ def _check_use_naive_attention() -> bool:
if not is_hip():
return False
# For ROCm, check whether flash attention is installed or not.
has_flash_attn = importlib.util.find_spec("flash_attn") is None
if not has_flash_attn:
use_naive_attention = importlib.util.find_spec("flash_attn") is None
if use_naive_attention:
logger.warning("flash_attn is not installed. Using naive attention. "
"This will take significantly more GPU memory.")
return True

View File

@ -41,7 +41,7 @@ else:
if torch.version.cuda is not None:
so_file = "libnccl.so.2"
elif torch.version.hip is not None:
so_file = "librccl.so.2"
so_file = "librccl.so.1"
else:
raise ValueError("NCCL only supports CUDA and ROCm backends.")
logger.debug(f"Loading nccl from library {so_file}")