[CI/Build][Misc] Update Pytest Marker for VLMs (#5623)

This commit is contained in:
Roger Wang 2024-06-18 06:10:04 -07:00 committed by GitHub
parent f0cc0e68e3
commit 4ad7b53e59
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 8 additions and 8 deletions

View File

@ -23,4 +23,4 @@ docker exec cpu-test-avx2 bash -c "python3 examples/offline_inference.py"
docker exec cpu-test bash -c "cd tests; docker exec cpu-test bash -c "cd tests;
pip install pytest Pillow protobuf pip install pytest Pillow protobuf
cd ../ cd ../
pytest -v -s tests/models -m \"not llava\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py" pytest -v -s tests/models -m \"not vlm\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py"

View File

@ -100,13 +100,13 @@ steps:
- label: Models Test - label: Models Test
#mirror_hardwares: [amd] #mirror_hardwares: [amd]
commands: commands:
- pytest -v -s models -m \"not llava\" - pytest -v -s models -m \"not vlm\"
- label: Llava Test - label: Vision Language Models Test
mirror_hardwares: [amd] mirror_hardwares: [amd]
commands: commands:
- bash ../.buildkite/download-images.sh - bash ../.buildkite/download-images.sh
- pytest -v -s models -m llava - pytest -v -s models -m vlm
- label: Prefix Caching Test - label: Prefix Caching Test
mirror_hardwares: [amd] mirror_hardwares: [amd]

View File

@ -71,5 +71,5 @@ markers = [
"skip_global_cleanup", "skip_global_cleanup",
"llm: run tests for vLLM API only", "llm: run tests for vLLM API only",
"openai: run tests for OpenAI API only", "openai: run tests for OpenAI API only",
"llava: run tests for LLaVA models only", "vlm: run tests for vision language models only",
] ]

View File

@ -7,7 +7,7 @@ from vllm.config import VisionLanguageConfig
from ..conftest import IMAGE_FILES from ..conftest import IMAGE_FILES
pytestmark = pytest.mark.llava pytestmark = pytest.mark.vlm
# The image token is placed before "user" on purpose so that the test can pass # The image token is placed before "user" on purpose so that the test can pass
HF_IMAGE_PROMPTS = [ HF_IMAGE_PROMPTS = [

View File

@ -7,7 +7,7 @@ from vllm.config import VisionLanguageConfig
from ..conftest import IMAGE_FILES from ..conftest import IMAGE_FILES
pytestmark = pytest.mark.llava pytestmark = pytest.mark.vlm
_PREFACE = ( _PREFACE = (
"A chat between a curious human and an artificial intelligence assistant. " "A chat between a curious human and an artificial intelligence assistant. "

View File

@ -8,7 +8,7 @@ from vllm.utils import is_cpu
from ..conftest import IMAGE_FILES from ..conftest import IMAGE_FILES
pytestmark = pytest.mark.llava pytestmark = pytest.mark.vlm
# The image token is placed before "user" on purpose so that the test can pass # The image token is placed before "user" on purpose so that the test can pass
HF_IMAGE_PROMPTS = [ HF_IMAGE_PROMPTS = [