diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index 532d6ad8..f4fa24be 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -23,4 +23,4 @@ docker exec cpu-test-avx2 bash -c "python3 examples/offline_inference.py" docker exec cpu-test bash -c "cd tests; pip install pytest Pillow protobuf cd ../ - pytest -v -s tests/models -m \"not llava\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py" + pytest -v -s tests/models -m \"not vlm\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py" diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 6439a315..c1e433ec 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -100,13 +100,13 @@ steps: - label: Models Test #mirror_hardwares: [amd] commands: - - pytest -v -s models -m \"not llava\" + - pytest -v -s models -m \"not vlm\" -- label: Llava Test +- label: Vision Language Models Test mirror_hardwares: [amd] commands: - bash ../.buildkite/download-images.sh - - pytest -v -s models -m llava + - pytest -v -s models -m vlm - label: Prefix Caching Test mirror_hardwares: [amd] diff --git a/pyproject.toml b/pyproject.toml index eb691c29..4958aae0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,5 +71,5 @@ markers = [ "skip_global_cleanup", "llm: run tests for vLLM API only", "openai: run tests for OpenAI API only", - "llava: run tests for LLaVA models only", + "vlm: run tests for vision language models only", ] diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index a1f0cff1..b41c69f7 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -7,7 +7,7 @@ from vllm.config import VisionLanguageConfig from ..conftest import IMAGE_FILES -pytestmark = pytest.mark.llava +pytestmark = pytest.mark.vlm # The image token is placed before "user" on purpose so that the test can pass HF_IMAGE_PROMPTS = [ diff --git a/tests/models/test_llava_next.py b/tests/models/test_llava_next.py index aa6ee268..0eca5cb5 100644 --- a/tests/models/test_llava_next.py +++ b/tests/models/test_llava_next.py @@ -7,7 +7,7 @@ from vllm.config import VisionLanguageConfig from ..conftest import IMAGE_FILES -pytestmark = pytest.mark.llava +pytestmark = pytest.mark.vlm _PREFACE = ( "A chat between a curious human and an artificial intelligence assistant. " diff --git a/tests/models/test_phi3v.py b/tests/models/test_phi3v.py index 607ad95e..1732e8f0 100644 --- a/tests/models/test_phi3v.py +++ b/tests/models/test_phi3v.py @@ -8,7 +8,7 @@ from vllm.utils import is_cpu from ..conftest import IMAGE_FILES -pytestmark = pytest.mark.llava +pytestmark = pytest.mark.vlm # The image token is placed before "user" on purpose so that the test can pass HF_IMAGE_PROMPTS = [