[Misc] Restrict ray version dependency and update PP feature warning in V1 (#15556)

This commit is contained in:
Rui Qiao 2025-03-26 23:21:07 -07:00 committed by GitHub
parent 619d3de8bd
commit df8d3d1287
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 8 additions and 5 deletions

View File

@ -4,7 +4,7 @@
numba == 0.60.0 # v0.61 doesn't support Python 3.9. Required for N-gram speculative decoding
# Dependencies for NVIDIA GPUs
ray[cgraph]>=2.43.0 # Ray Compiled Graph, required for pipeline parallelism in V1.
ray[cgraph]>=2.43.0, !=2.44.* # Ray Compiled Graph, required for pipeline parallelism in V1.
torch==2.6.0
torchaudio==2.6.0
# These must be updated alongside torch

View File

@ -17,7 +17,7 @@ vector_quantize_pytorch # required for minicpmo_26 test
vocos # required for minicpmo_26 test
peft
pqdm
ray[cgraph]>=2.43.0 # Ray Compiled Graph, required by pipeline parallelism tests
ray[cgraph]>=2.43.0, !=2.44.* # Ray Compiled Graph, required by pipeline parallelism tests
sentence-transformers # required for embedding tests
soundfile # required for audio tests
jiwer # required for audio tests

View File

@ -313,7 +313,7 @@ class ModelConfig:
raise ValueError(
"VLLM_ATTENTION_BACKEND is set to FLASHINFER, but flashinfer "
"module was not found."
"See https://github.com/vllm-project/vllm/blob/main/Dockerfile"
"See https://github.com/vllm-project/vllm/blob/main/Dockerfile "
"for instructions on how to install it.")
# The tokenizer version is consistent with the model version by default.

View File

@ -1686,8 +1686,11 @@ class EngineArgs:
if self.enable_lora and _warn_or_fallback("LORA"):
return False
# PP is supported on V1, but off by default for now.
if self.pipeline_parallel_size > 1 and _warn_or_fallback("PP"):
# PP is supported on V1 with Ray distributed executor,
# but off for MP distributed executor for now.
if (self.pipeline_parallel_size > 1
and self.distributed_executor_backend == "mp"
and _warn_or_fallback("PP (MP distributed executor)")):
return False
# ngram is supported on V1, but off by default for now.