
- **Add SPDX license headers to python source files** - **Check for SPDX headers using pre-commit** commit 9d7ef44c3cfb72ca4c32e1c677d99259d10d4745 Author: Russell Bryant <rbryant@redhat.com> Date: Fri Jan 31 14:18:24 2025 -0500 Add SPDX license headers to python source files This commit adds SPDX license headers to python source files as recommended to the project by the Linux Foundation. These headers provide a concise way that is both human and machine readable for communicating license information for each source file. It helps avoid any ambiguity about the license of the code and can also be easily used by tools to help manage license compliance. The Linux Foundation runs license scans against the codebase to help ensure we are in compliance with the licenses of the code we use, including dependencies. Having these headers in place helps that tool do its job. More information can be found on the SPDX site: - https://spdx.dev/learn/handling-license-info/ Signed-off-by: Russell Bryant <rbryant@redhat.com> commit 5a1cf1cb3b80759131c73f6a9dddebccac039dea Author: Russell Bryant <rbryant@redhat.com> Date: Fri Jan 31 14:36:32 2025 -0500 Check for SPDX headers using pre-commit Signed-off-by: Russell Bryant <rbryant@redhat.com> --------- Signed-off-by: Russell Bryant <rbryant@redhat.com>
237 lines
7.9 KiB
Python
237 lines
7.9 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
import os
|
|
import re
|
|
from typing import List, Optional, Tuple, Type
|
|
|
|
import pytest
|
|
from transformers import AutoTokenizer
|
|
|
|
from vllm.multimodal.image import rescale_image_size
|
|
from vllm.platforms import current_platform
|
|
from vllm.sequence import SampleLogprobs
|
|
|
|
from ....conftest import IMAGE_ASSETS, HfRunner, PromptImageInput, VllmRunner
|
|
from ...utils import check_logprobs_close
|
|
|
|
HF_IMAGE_PROMPTS = IMAGE_ASSETS.prompts({
|
|
"stop_sign":
|
|
"<|user|>\n<|image_1|>\nWhat's the content of the image?<|end|>\n<|assistant|>\n", # noqa: E501
|
|
"cherry_blossom":
|
|
"<|user|>\n<|image_1|>\nWhat is the season?<|end|>\n<|assistant|>\n",
|
|
})
|
|
HF_MULTIIMAGE_IMAGE_PROMPT = "<|user|>\n<|image_1|>\n<|image_2|>\nDescribe these images.<|end|>\n<|assistant|>\n" # noqa: E501
|
|
|
|
models = ["microsoft/Phi-3.5-vision-instruct"]
|
|
|
|
|
|
def vllm_to_hf_output(vllm_output: Tuple[List[int], str,
|
|
Optional[SampleLogprobs]],
|
|
model: str):
|
|
"""Sanitize vllm output to be comparable with hf output."""
|
|
_, output_str, out_logprobs = vllm_output
|
|
|
|
output_str_without_image = re.sub(r"(<\|image_\d+\|>)+", "", output_str)
|
|
assert output_str_without_image[0] == " "
|
|
output_str_without_image = output_str_without_image[1:]
|
|
|
|
hf_output_str = output_str_without_image + "<|end|><|endoftext|>"
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model)
|
|
hf_output_ids = tokenizer.encode(output_str_without_image)
|
|
assert hf_output_ids[0] == 1
|
|
hf_output_ids = hf_output_ids[1:]
|
|
|
|
return hf_output_ids, hf_output_str, out_logprobs
|
|
|
|
|
|
target_dtype = "half"
|
|
|
|
# ROCm Triton FA can run into shared memory issues with these models,
|
|
# use other backends in the meantime
|
|
# FIXME (mattwong, gshtrasb, hongxiayan)
|
|
if current_platform.is_rocm():
|
|
os.environ["VLLM_USE_TRITON_FLASH_ATTN"] = "0"
|
|
|
|
|
|
def run_test(
|
|
hf_runner: Type[HfRunner],
|
|
vllm_runner: Type[VllmRunner],
|
|
inputs: List[Tuple[List[str], PromptImageInput]],
|
|
model: str,
|
|
*,
|
|
dtype: str,
|
|
max_tokens: int,
|
|
num_logprobs: int,
|
|
mm_limit: int,
|
|
tensor_parallel_size: int,
|
|
distributed_executor_backend: Optional[str] = None,
|
|
):
|
|
"""Inference result should be the same between hf and vllm.
|
|
|
|
All the image fixtures for the test are from IMAGE_ASSETS.
|
|
For huggingface runner, we provide the PIL images as input.
|
|
For vllm runner, we provide MultiModalDataDict objects
|
|
and corresponding MultiModalConfig as input.
|
|
Note, the text input is also adjusted to abide by vllm contract.
|
|
The text output is sanitized to be able to compare with hf.
|
|
"""
|
|
# HACK - this is an attempted workaround for the following bug
|
|
# https://github.com/huggingface/transformers/issues/34307
|
|
from transformers import AutoImageProcessor # noqa: F401
|
|
from transformers import AutoProcessor # noqa: F401
|
|
|
|
# NOTE: take care of the order. run vLLM first, and then run HF.
|
|
# vLLM needs a fresh new process without cuda initialization.
|
|
# if we run HF first, the cuda initialization will be done and it
|
|
# will hurt multiprocessing backend with fork method (the default method).
|
|
# max_model_len should be greater than image_feature_size
|
|
with vllm_runner(model,
|
|
task="generate",
|
|
max_model_len=4096,
|
|
max_num_seqs=2,
|
|
dtype=dtype,
|
|
limit_mm_per_prompt={"image": mm_limit},
|
|
tensor_parallel_size=tensor_parallel_size,
|
|
distributed_executor_backend=distributed_executor_backend,
|
|
enforce_eager=True) as vllm_model:
|
|
vllm_outputs_per_case = [
|
|
vllm_model.generate_greedy_logprobs(prompts,
|
|
max_tokens,
|
|
num_logprobs=num_logprobs,
|
|
images=images)
|
|
for prompts, images in inputs
|
|
]
|
|
|
|
# use eager mode for hf runner, since phi3_v didn't work with flash_attn
|
|
hf_model_kwargs = {"_attn_implementation": "eager"}
|
|
with hf_runner(model, dtype=dtype,
|
|
model_kwargs=hf_model_kwargs) as hf_model:
|
|
eos_token_id = hf_model.processor.tokenizer.eos_token_id
|
|
hf_outputs_per_case = [
|
|
hf_model.generate_greedy_logprobs_limit(prompts,
|
|
max_tokens,
|
|
num_logprobs=num_logprobs,
|
|
images=images,
|
|
eos_token_id=eos_token_id)
|
|
for prompts, images in inputs
|
|
]
|
|
|
|
for hf_outputs, vllm_outputs in zip(hf_outputs_per_case,
|
|
vllm_outputs_per_case):
|
|
check_logprobs_close(
|
|
outputs_0_lst=hf_outputs,
|
|
outputs_1_lst=[
|
|
vllm_to_hf_output(vllm_output, model)
|
|
for vllm_output in vllm_outputs
|
|
],
|
|
name_0="hf",
|
|
name_1="vllm",
|
|
)
|
|
|
|
|
|
# Since we use _attn_implementation="eager" for hf_runner, there is more
|
|
# significant numerical difference. The basic `logprobs=5` fails to pass.
|
|
@pytest.mark.parametrize("model", models)
|
|
@pytest.mark.parametrize(
|
|
"size_factors",
|
|
[
|
|
# No image
|
|
[],
|
|
# Single-scale
|
|
[1.0],
|
|
# Single-scale, batched
|
|
[1.0, 1.0, 1.0],
|
|
# Multi-scale
|
|
[0.25, 0.5, 1.0],
|
|
],
|
|
)
|
|
@pytest.mark.parametrize("dtype", [target_dtype])
|
|
@pytest.mark.parametrize("max_tokens", [128])
|
|
@pytest.mark.parametrize("num_logprobs", [10])
|
|
def test_models(hf_runner, vllm_runner, image_assets, model, size_factors,
|
|
dtype: str, max_tokens: int, num_logprobs: int) -> None:
|
|
images = [asset.pil_image for asset in image_assets]
|
|
|
|
inputs_per_image = [(
|
|
[prompt for _ in size_factors],
|
|
[rescale_image_size(image, factor) for factor in size_factors],
|
|
) for image, prompt in zip(images, HF_IMAGE_PROMPTS)]
|
|
|
|
run_test(
|
|
hf_runner,
|
|
vllm_runner,
|
|
inputs_per_image,
|
|
model,
|
|
dtype=dtype,
|
|
max_tokens=max_tokens,
|
|
num_logprobs=num_logprobs,
|
|
mm_limit=1,
|
|
tensor_parallel_size=1,
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize("model", models)
|
|
@pytest.mark.parametrize("dtype", [target_dtype])
|
|
def test_regression_7840(hf_runner, vllm_runner, image_assets, model,
|
|
dtype) -> None:
|
|
images = [asset.pil_image for asset in image_assets]
|
|
|
|
inputs_regresion_7840 = [
|
|
([prompt], [image]) for image, prompt in zip(images, HF_IMAGE_PROMPTS)
|
|
]
|
|
|
|
# Regression test for #7840.
|
|
run_test(
|
|
hf_runner,
|
|
vllm_runner,
|
|
inputs_regresion_7840,
|
|
model,
|
|
dtype=dtype,
|
|
max_tokens=128,
|
|
num_logprobs=10,
|
|
mm_limit=1,
|
|
tensor_parallel_size=1,
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize("model", models)
|
|
@pytest.mark.parametrize(
|
|
"size_factors",
|
|
[
|
|
# No image
|
|
[],
|
|
# Single-scale
|
|
[1.0],
|
|
# Single-scale, batched
|
|
[1.0, 1.0, 1.0],
|
|
# Multi-scale
|
|
[0.25, 0.5, 1.0],
|
|
],
|
|
)
|
|
@pytest.mark.parametrize("dtype", [target_dtype])
|
|
@pytest.mark.parametrize("max_tokens", [128])
|
|
@pytest.mark.parametrize("num_logprobs", [10])
|
|
def test_multi_images_models(hf_runner, vllm_runner, image_assets, model,
|
|
size_factors, dtype: str, max_tokens: int,
|
|
num_logprobs: int) -> None:
|
|
images = [asset.pil_image for asset in image_assets]
|
|
|
|
inputs_per_case = [
|
|
([HF_MULTIIMAGE_IMAGE_PROMPT for _ in size_factors],
|
|
[[rescale_image_size(image, factor) for image in images]
|
|
for factor in size_factors])
|
|
]
|
|
|
|
run_test(
|
|
hf_runner,
|
|
vllm_runner,
|
|
inputs_per_case,
|
|
model,
|
|
dtype=dtype,
|
|
max_tokens=max_tokens,
|
|
num_logprobs=num_logprobs,
|
|
mm_limit=2,
|
|
tensor_parallel_size=1,
|
|
)
|