Don't build punica kernels by default (#2605)

This commit is contained in:
Philipp Moritz 2024-01-26 15:19:19 -08:00 committed by GitHub
parent 3a0e1fc070
commit 390b495ff3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 11 additions and 4 deletions

View File

@ -13,6 +13,8 @@ $python_executable -m pip install -r requirements.txt
# Limit the number of parallel jobs to avoid OOM
export MAX_JOBS=1
# Make sure punica is built for the release (for LoRA)
export VLLM_INSTALL_PUNICA_KERNELS=1
# Build
$python_executable setup.py bdist_wheel --dist-dir=dist

View File

@ -45,6 +45,8 @@ ENV MAX_JOBS=${max_jobs}
# number of threads used by nvcc
ARG nvcc_threads=8
ENV NVCC_THREADS=$nvcc_threads
# make sure punica kernels are built (for LoRA)
ENV VLLM_INSTALL_PUNICA_KERNELS=1
RUN python3 setup.py build_ext --inplace
#################### EXTENSION Build IMAGE ####################

View File

@ -265,7 +265,7 @@ if _is_cuda():
with contextlib.suppress(ValueError):
torch_cpp_ext.COMMON_NVCC_FLAGS.remove(flag)
install_punica = bool(int(os.getenv("VLLM_INSTALL_PUNICA_KERNELS", "1")))
install_punica = bool(int(os.getenv("VLLM_INSTALL_PUNICA_KERNELS", "0")))
device_count = torch.cuda.device_count()
for i in range(device_count):
major, minor = torch.cuda.get_device_capability(i)

View File

@ -157,10 +157,13 @@ else:
**kwargs # pylint: disable=unused-argument
):
if torch.cuda.get_device_capability() < (8, 0):
raise ImportError(
"LoRA kernels require compute capability>=8.0") from import_exc
raise ImportError("punica LoRA kernels require compute "
"capability>=8.0") from import_exc
else:
raise import_exc
raise ImportError(
"punica LoRA kernels could not be imported. If you built vLLM "
"from source, make sure VLLM_INSTALL_PUNICA_KERNELS=1 env var "
"was set.") from import_exc
bgmv = _raise_exc
add_lora = _raise_exc