2024-10-30 10:32:17 -06:00
|
|
|
"""Tests for phi3v's multimodal preprocessing kwargs."""
|
|
|
|
import pytest
|
|
|
|
|
2025-01-08 18:59:58 +08:00
|
|
|
from vllm.multimodal import MULTIMODAL_REGISTRY
|
|
|
|
from vllm.multimodal.utils import cached_get_tokenizer
|
2024-10-30 10:32:17 -06:00
|
|
|
|
|
|
|
from .....conftest import _ImageAssets
|
|
|
|
from ....utils import build_model_context
|
|
|
|
|
|
|
|
|
2025-01-04 19:40:53 +08:00
|
|
|
@pytest.mark.parametrize("model_id", ["microsoft/Phi-3.5-vision-instruct"])
|
|
|
|
# yapf: disable
|
2024-12-10 04:55:10 +08:00
|
|
|
@pytest.mark.parametrize(
|
2025-01-04 19:40:53 +08:00
|
|
|
("mm_processor_kwargs", "expected_toks_per_img"),
|
2024-12-10 04:55:10 +08:00
|
|
|
[
|
2025-01-04 19:40:53 +08:00
|
|
|
({"num_crops": 4}, 757),
|
|
|
|
({"num_crops": 16}, 1921),
|
2024-12-10 04:55:10 +08:00
|
|
|
# the default num_crops of phi-3.5-vision is 4
|
2025-01-04 19:40:53 +08:00
|
|
|
({}, 757),
|
2024-12-10 04:55:10 +08:00
|
|
|
])
|
2025-01-04 19:40:53 +08:00
|
|
|
# yapf: enable
|
2024-12-19 02:16:40 +08:00
|
|
|
@pytest.mark.parametrize("num_imgs", [1, 2])
|
2025-01-04 19:40:53 +08:00
|
|
|
def test_processor_override(
|
|
|
|
image_assets: _ImageAssets,
|
|
|
|
model_id: str,
|
|
|
|
mm_processor_kwargs: dict[str, int],
|
|
|
|
expected_toks_per_img: int,
|
|
|
|
num_imgs: int,
|
|
|
|
):
|
2024-10-30 10:32:17 -06:00
|
|
|
"""Ensure input_processor_for_phi3v handles num_crops properly."""
|
2025-01-08 18:59:58 +08:00
|
|
|
# Avoid initializing CUDA early
|
|
|
|
from vllm.model_executor.models.phi3v import _IMAGE_TOKEN_ID
|
|
|
|
|
2024-10-30 10:32:17 -06:00
|
|
|
ctx = build_model_context(
|
2025-01-04 19:40:53 +08:00
|
|
|
model_name=model_id,
|
|
|
|
tokenizer_name=model_id,
|
2024-10-30 10:32:17 -06:00
|
|
|
trust_remote_code=True,
|
2025-01-01 23:44:42 +08:00
|
|
|
limit_mm_per_prompt={"image": num_imgs},
|
2024-10-30 10:32:17 -06:00
|
|
|
)
|
2025-01-08 18:59:58 +08:00
|
|
|
tokenizer = cached_get_tokenizer(ctx.model_config.tokenizer)
|
|
|
|
processor = MULTIMODAL_REGISTRY.create_processor(
|
|
|
|
ctx.model_config,
|
|
|
|
tokenizer=tokenizer,
|
|
|
|
)
|
2025-01-04 19:40:53 +08:00
|
|
|
|
2024-10-30 10:32:17 -06:00
|
|
|
# Build the image str / prompt based on the number of images we pass
|
|
|
|
img_str = "".join([f"<|image_{idx}|>\n" for idx in range(1, num_imgs + 1)])
|
|
|
|
prompt = f"<|user|>\n{img_str}<|end|>\n<|assistant|>\n"
|
2025-01-04 19:40:53 +08:00
|
|
|
mm_data = {"image": [image_assets[0].pil_image] * num_imgs}
|
2024-10-30 10:32:17 -06:00
|
|
|
|
2024-12-10 04:55:10 +08:00
|
|
|
processed_inputs = processor.apply(prompt, mm_data, mm_processor_kwargs)
|
2024-10-30 10:32:17 -06:00
|
|
|
|
|
|
|
# Ensure we have the right number of placeholders per num_crops size
|
|
|
|
img_tok_count = processed_inputs["prompt_token_ids"].count(_IMAGE_TOKEN_ID)
|
|
|
|
assert img_tok_count == expected_toks_per_img * num_imgs
|