diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 21c5e247..7e812cbc 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -515,7 +515,7 @@ steps: - vllm/worker/model_runner.py - entrypoints/llm/test_collective_rpc.py commands: - - pytest -v -s entrypoints/llm/test_collective_rpc.py + - VLLM_ENABLE_V1_MULTIPROCESSING=0 pytest -v -s entrypoints/llm/test_collective_rpc.py - pytest -v -s ./compile/test_basic_correctness.py - pytest -v -s ./compile/test_wrapper.py - VLLM_TEST_SAME_HOST=1 torchrun --nproc-per-node=4 distributed/test_same_node.py | grep 'Same node test passed' diff --git a/tests/entrypoints/llm/test_collective_rpc.py b/tests/entrypoints/llm/test_collective_rpc.py index 64c473c4..d51b7c26 100644 --- a/tests/entrypoints/llm/test_collective_rpc.py +++ b/tests/entrypoints/llm/test_collective_rpc.py @@ -21,18 +21,9 @@ def test_collective_rpc(tp_size, backend): def echo_rank(self): return self.rank - from vllm.worker.worker import Worker - - class MyWorker(Worker): - - def echo_rank(self): - return self.rank - llm = LLM(model="meta-llama/Llama-3.2-1B-Instruct", enforce_eager=True, load_format="dummy", tensor_parallel_size=tp_size, - distributed_executor_backend=backend, - worker_cls=MyWorker) - for method in ["echo_rank", echo_rank]: - assert llm.collective_rpc(method) == list(range(tp_size)) + distributed_executor_backend=backend) + assert llm.collective_rpc(echo_rank) == list(range(tp_size))