2024-01-14 12:37:58 -08:00
|
|
|
# In this file, you can add more tests to run either by adding a new step or
|
|
|
|
# adding a new command to an existing step. See different options here for examples.
|
2024-06-24 21:09:02 -07:00
|
|
|
|
|
|
|
# This script will be feed into Jinja template in `test-template-aws.j2` at
|
|
|
|
# https://github.com/vllm-project/buildkite-ci/blob/main/scripts/test-template-aws.j2
|
|
|
|
# to generate the final pipeline yaml file.
|
|
|
|
|
2024-01-14 12:37:58 -08:00
|
|
|
|
|
|
|
steps:
|
|
|
|
- label: Regression Test
|
2024-05-16 22:58:25 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-01-14 12:37:58 -08:00
|
|
|
command: pytest -v -s test_regression.py
|
|
|
|
working_dir: "/vllm-workspace/tests" # optional
|
|
|
|
|
|
|
|
- label: AsyncEngine Test
|
2024-05-16 22:58:25 -05:00
|
|
|
#mirror_hardwares: [amd]
|
2024-01-14 12:37:58 -08:00
|
|
|
command: pytest -v -s async_engine
|
|
|
|
|
2024-02-18 16:44:50 -08:00
|
|
|
- label: Basic Correctness Test
|
2024-05-16 22:58:25 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-04-13 01:56:57 +09:00
|
|
|
commands:
|
|
|
|
- VLLM_ATTENTION_BACKEND=XFORMERS pytest -v -s basic_correctness/test_basic_correctness.py
|
|
|
|
- VLLM_ATTENTION_BACKEND=FLASH_ATTN pytest -v -s basic_correctness/test_basic_correctness.py
|
|
|
|
- VLLM_ATTENTION_BACKEND=XFORMERS pytest -v -s basic_correctness/test_chunked_prefill.py
|
|
|
|
- VLLM_ATTENTION_BACKEND=FLASH_ATTN pytest -v -s basic_correctness/test_chunked_prefill.py
|
2024-05-02 11:24:13 +09:00
|
|
|
- VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1 pytest -v -s basic_correctness/test_preemption.py
|
2024-03-13 17:02:21 -07:00
|
|
|
|
2024-03-06 11:23:34 +09:00
|
|
|
- label: Core Test
|
2024-05-02 14:29:07 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-06-27 00:15:24 -07:00
|
|
|
commands:
|
|
|
|
- pytest -v -s core
|
|
|
|
- pytest -v -s distributed/test_parallel_state.py
|
2024-02-18 16:44:50 -08:00
|
|
|
|
|
|
|
- label: Distributed Comm Ops Test
|
2024-05-16 22:58:25 -05:00
|
|
|
#mirror_hardwares: [amd]
|
2024-05-13 22:50:09 +08:00
|
|
|
working_dir: "/vllm-workspace/tests"
|
2024-05-01 21:28:21 -07:00
|
|
|
num_gpus: 2
|
2024-06-20 22:12:35 -07:00
|
|
|
commands:
|
|
|
|
- pytest -v -s distributed/test_comm_ops.py
|
|
|
|
- pytest -v -s distributed/test_shm_broadcast.py
|
2024-02-18 16:44:50 -08:00
|
|
|
|
2024-06-16 00:48:02 -07:00
|
|
|
- label: Distributed Tests (2 GPUs)
|
2024-05-16 22:58:25 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-05-13 22:50:09 +08:00
|
|
|
working_dir: "/vllm-workspace/tests"
|
|
|
|
num_gpus: 2
|
2024-03-27 00:33:26 -07:00
|
|
|
commands:
|
2024-06-29 23:45:54 +08:00
|
|
|
- bash ../.buildkite/download-images.sh
|
2024-06-11 10:53:59 -07:00
|
|
|
- VLLM_TEST_SAME_HOST=1 torchrun --nproc-per-node=4 distributed/test_same_node.py
|
2024-05-14 10:38:59 -07:00
|
|
|
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|
|
|
|
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|
|
|
|
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_chunked_prefill_distributed.py
|
|
|
|
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_chunked_prefill_distributed.py
|
2024-06-29 23:45:54 +08:00
|
|
|
- TEST_DIST_MODEL=llava-hf/llava-1.5-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_multimodal_broadcast.py
|
|
|
|
- TEST_DIST_MODEL=microsoft/Phi-3-vision-128k-instruct DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_multimodal_broadcast.py
|
2024-05-14 10:38:59 -07:00
|
|
|
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_basic_distributed_correctness.py
|
|
|
|
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_basic_distributed_correctness.py
|
|
|
|
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_chunked_prefill_distributed.py
|
|
|
|
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_chunked_prefill_distributed.py
|
2024-06-29 23:45:54 +08:00
|
|
|
- TEST_DIST_MODEL=llava-hf/llava-1.5-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_multimodal_broadcast.py
|
2024-06-30 01:06:13 -07:00
|
|
|
- TEST_DIST_MODEL=microsoft/Phi-3-vision-128k-instruct DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_multimodal_broadcast.py
|
2024-06-25 18:56:06 +09:00
|
|
|
- pytest -v -s spec_decode/e2e/test_integration_dist_tp2.py
|
2024-06-06 19:07:57 -07:00
|
|
|
- CUDA_VISIBLE_DEVICES=0,1 pytest -v -s test_sharded_state_loader.py
|
2024-06-13 16:06:49 -07:00
|
|
|
- CUDA_VISIBLE_DEVICES=0,1 pytest -v -s distributed/test_utils.py
|
2024-01-14 12:37:58 -08:00
|
|
|
|
2024-06-16 00:48:02 -07:00
|
|
|
- label: Distributed Tests (4 GPUs)
|
2024-05-16 22:58:25 -05:00
|
|
|
#mirror_hardwares: [amd]
|
2024-05-13 22:50:09 +08:00
|
|
|
working_dir: "/vllm-workspace/tests"
|
2024-05-01 21:28:21 -07:00
|
|
|
num_gpus: 4
|
|
|
|
commands:
|
2024-05-13 22:50:09 +08:00
|
|
|
- pytest -v -s distributed/test_pynccl.py
|
2024-06-16 00:48:02 -07:00
|
|
|
# We want to test that models which use 2 GPUs work with 4 GPUs, which is why we duplicate them here.
|
|
|
|
# See https://github.com/vllm-project/vllm/pull/5473#issuecomment-2166601837 for context.
|
|
|
|
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|
|
|
|
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_basic_distributed_correctness.py
|
2024-06-25 18:56:06 +09:00
|
|
|
- pytest -v -s spec_decode/e2e/test_integration_dist_tp4.py
|
2024-05-01 21:28:21 -07:00
|
|
|
|
2024-01-14 12:37:58 -08:00
|
|
|
- label: Engine Test
|
2024-05-16 22:58:25 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-04-18 16:15:12 -07:00
|
|
|
command: pytest -v -s engine tokenization test_sequence.py test_config.py test_logger.py
|
2024-01-14 12:37:58 -08:00
|
|
|
|
2024-01-17 05:33:14 +00:00
|
|
|
- label: Entrypoints Test
|
2024-05-20 13:29:28 -05:00
|
|
|
mirror_hardwares: [amd]
|
|
|
|
|
2024-04-06 17:11:41 -07:00
|
|
|
commands:
|
2024-06-30 12:58:49 +08:00
|
|
|
- pytest -v -s entrypoints/llm
|
|
|
|
- pytest -v -s entrypoints/openai
|
2024-01-17 05:33:14 +00:00
|
|
|
|
2024-03-28 14:36:10 -07:00
|
|
|
- label: Examples Test
|
|
|
|
working_dir: "/vllm-workspace/examples"
|
2024-05-02 14:29:07 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-03-28 14:36:10 -07:00
|
|
|
commands:
|
|
|
|
# install aws cli for llava_example.py
|
2024-05-13 17:57:07 -04:00
|
|
|
# install tensorizer for tensorize_vllm_model.py
|
|
|
|
- pip install awscli tensorizer
|
2024-03-28 14:36:10 -07:00
|
|
|
- python3 offline_inference.py
|
|
|
|
- python3 offline_inference_with_prefix.py
|
|
|
|
- python3 llm_engine_example.py
|
|
|
|
- python3 llava_example.py
|
2024-05-13 17:57:07 -04:00
|
|
|
- python3 tensorize_vllm_model.py --model facebook/opt-125m serialize --serialized-directory /tmp/ --suffix v1 && python3 tensorize_vllm_model.py --model facebook/opt-125m deserialize --path-to-tensors /tmp/vllm/facebook/opt-125m/v1/model.tensors
|
2024-03-28 14:36:10 -07:00
|
|
|
|
2024-06-04 12:01:46 +08:00
|
|
|
- label: Inputs Test
|
|
|
|
#mirror_hardwares: [amd]
|
|
|
|
commands:
|
|
|
|
- bash ../.buildkite/download-images.sh
|
|
|
|
- pytest -v -s test_inputs.py
|
|
|
|
- pytest -v -s multimodal
|
|
|
|
|
2024-03-17 14:56:30 -07:00
|
|
|
- label: Kernels Test %N
|
2024-05-16 22:58:25 -05:00
|
|
|
#mirror_hardwares: [amd]
|
2024-03-17 14:56:30 -07:00
|
|
|
command: pytest -v -s kernels --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
|
|
|
parallelism: 4
|
2024-01-14 12:37:58 -08:00
|
|
|
|
|
|
|
- label: Models Test
|
2024-05-07 11:23:17 -05:00
|
|
|
#mirror_hardwares: [amd]
|
2024-01-14 12:37:58 -08:00
|
|
|
commands:
|
2024-06-18 06:10:04 -07:00
|
|
|
- pytest -v -s models -m \"not vlm\"
|
2024-01-14 12:37:58 -08:00
|
|
|
|
2024-06-18 06:10:04 -07:00
|
|
|
- label: Vision Language Models Test
|
2024-05-16 22:58:25 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-03-25 14:16:30 -07:00
|
|
|
commands:
|
|
|
|
- bash ../.buildkite/download-images.sh
|
2024-06-18 06:10:04 -07:00
|
|
|
- pytest -v -s models -m vlm
|
2024-03-25 14:16:30 -07:00
|
|
|
|
2024-01-17 16:32:10 -08:00
|
|
|
- label: Prefix Caching Test
|
2024-05-02 14:29:07 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-01-17 16:32:10 -08:00
|
|
|
commands:
|
|
|
|
- pytest -v -s prefix_caching
|
|
|
|
|
2024-01-14 12:37:58 -08:00
|
|
|
- label: Samplers Test
|
2024-05-16 22:58:25 -05:00
|
|
|
#mirror_hardwares: [amd]
|
2024-03-21 06:46:05 +09:00
|
|
|
command: pytest -v -s samplers
|
2024-01-14 12:37:58 -08:00
|
|
|
|
2024-03-21 07:25:01 +08:00
|
|
|
- label: LogitsProcessor Test
|
2024-05-02 14:29:07 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-03-21 07:25:01 +08:00
|
|
|
command: pytest -v -s test_logits_processor.py
|
|
|
|
|
2024-05-29 04:29:31 +08:00
|
|
|
- label: Utils Test
|
|
|
|
command: pytest -v -s test_utils.py
|
|
|
|
|
2024-01-14 12:37:58 -08:00
|
|
|
- label: Worker Test
|
2024-05-02 14:29:07 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-01-14 12:37:58 -08:00
|
|
|
command: pytest -v -s worker
|
|
|
|
|
2024-03-08 23:32:46 -08:00
|
|
|
- label: Speculative decoding tests
|
2024-05-07 11:23:17 -05:00
|
|
|
#mirror_hardwares: [amd]
|
2024-06-05 17:49:27 -05:00
|
|
|
commands:
|
|
|
|
# See https://github.com/vllm-project/vllm/issues/5152
|
|
|
|
- export VLLM_ATTENTION_BACKEND=XFORMERS
|
|
|
|
- pytest -v -s spec_decode
|
2024-03-08 23:32:46 -08:00
|
|
|
|
2024-03-17 14:56:30 -07:00
|
|
|
- label: LoRA Test %N
|
2024-05-16 22:58:25 -05:00
|
|
|
#mirror_hardwares: [amd]
|
2024-05-18 16:05:23 +09:00
|
|
|
command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore=lora/test_long_context.py
|
2024-03-17 14:56:30 -07:00
|
|
|
parallelism: 4
|
2024-01-24 00:26:37 +01:00
|
|
|
|
2024-05-18 16:05:23 +09:00
|
|
|
- label: LoRA Long Context (Distributed)
|
|
|
|
#mirror_hardwares: [amd]
|
|
|
|
num_gpus: 4
|
|
|
|
# This test runs llama 13B, so it is required to run on 4 GPUs.
|
|
|
|
commands:
|
2024-06-20 17:06:34 -07:00
|
|
|
# FIXIT: find out which code initialize cuda before running the test
|
|
|
|
# before the fix, we need to use spawn to test it
|
|
|
|
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
2024-06-06 19:07:57 -07:00
|
|
|
- pytest -v -s -x lora/test_long_context.py
|
2024-05-18 16:05:23 +09:00
|
|
|
|
2024-04-13 20:13:01 -04:00
|
|
|
- label: Tensorizer Test
|
2024-05-16 22:58:25 -05:00
|
|
|
#mirror_hardwares: [amd]
|
2024-04-16 11:34:39 -07:00
|
|
|
command: apt-get install curl libsodium23 && pytest -v -s tensorizer_loader
|
2024-04-13 20:13:01 -04:00
|
|
|
|
2024-02-23 00:00:12 +02:00
|
|
|
- label: Metrics Test
|
2024-05-16 22:58:25 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-02-23 00:00:12 +02:00
|
|
|
command: pytest -v -s metrics
|
|
|
|
|
2024-04-23 21:26:33 -04:00
|
|
|
- label: Quantization Test
|
2024-05-16 22:58:25 -05:00
|
|
|
#mirror_hardwares: [amd]
|
2024-04-23 21:26:33 -04:00
|
|
|
command: pytest -v -s quantization
|
|
|
|
|
2024-06-18 19:17:03 +03:00
|
|
|
- label: Tracing Test
|
|
|
|
commands:
|
|
|
|
- "pip install \
|
|
|
|
opentelemetry-sdk \
|
|
|
|
opentelemetry-api \
|
|
|
|
opentelemetry-exporter-otlp \
|
|
|
|
opentelemetry-semantic-conventions-ai"
|
|
|
|
- pytest -v -s tracing
|
|
|
|
|
2024-01-14 12:37:58 -08:00
|
|
|
- label: Benchmarks
|
|
|
|
working_dir: "/vllm-workspace/.buildkite"
|
2024-05-02 14:29:07 -05:00
|
|
|
mirror_hardwares: [amd]
|
2024-01-14 12:37:58 -08:00
|
|
|
commands:
|
|
|
|
- pip install aiohttp
|
|
|
|
- bash run-benchmarks.sh
|
2024-02-12 22:53:07 -08:00
|
|
|
|
2024-06-29 13:04:30 -04:00
|
|
|
- label: LM Eval Small Models
|
|
|
|
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
|
|
|
commands:
|
|
|
|
- pip install lm-eval
|
|
|
|
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
|
|
|
- bash ./run-tests.sh -c configs/models-small.txt -t 1
|
|
|
|
|
|
|
|
- label: LM Eval Large Models
|
|
|
|
gpu: a100
|
|
|
|
num_gpus: 4
|
|
|
|
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
|
|
|
commands:
|
|
|
|
- pip install lm-eval
|
|
|
|
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
|
|
|
- bash ./run-tests.sh -c configs/models-large.txt -t 4
|
|
|
|
|
2024-02-12 22:53:07 -08:00
|
|
|
- label: Documentation Build
|
2024-04-04 21:53:16 -07:00
|
|
|
working_dir: "/vllm-workspace/test_docs/docs"
|
2024-02-12 22:53:07 -08:00
|
|
|
no_gpu: True
|
|
|
|
commands:
|
|
|
|
- pip install -r requirements-docs.txt
|
|
|
|
- SPHINXOPTS=\"-W\" make html
|
2024-06-19 07:42:13 -07:00
|
|
|
|
2024-06-19 13:16:04 -07:00
|
|
|
- label: Distributed Tests (A100)
|
2024-06-19 07:42:13 -07:00
|
|
|
gpu: a100
|
2024-06-19 16:30:03 -07:00
|
|
|
num_gpus: 4
|
2024-06-19 07:42:13 -07:00
|
|
|
commands:
|
2024-06-19 13:16:04 -07:00
|
|
|
# NOTE: don't test llama model here, it seems hf implementation is buggy
|
|
|
|
# see https://github.com/vllm-project/vllm/pull/5689 for details
|
|
|
|
- pytest -v -s distributed/test_custom_all_reduce.py
|
|
|
|
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|
|
|
|
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_basic_distributed_correctness.py
|
2024-06-28 15:28:49 -07:00
|
|
|
- pip install https://github.com/flashinfer-ai/flashinfer/releases/download/v0.0.5/flashinfer-0.0.5+cu121torch2.3-cp310-cp310-linux_x86_64.whl
|
|
|
|
- VLLM_ATTENTION_BACKEND=FLASHINFER TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|
2024-06-29 18:18:31 -07:00
|
|
|
- VLLM_ATTENTION_BACKEND=FLASHINFER TEST_DIST_MODEL=meta-llama/Meta-Llama-3-8B DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|