2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2023-09-01 11:19:43 +09:00
|
|
|
"""Compare the outputs of HF and vLLM when using greedy sampling.
|
|
|
|
|
2024-03-29 13:06:40 +09:00
|
|
|
Run `pytest tests/models/test_models.py`.
|
2023-09-01 11:19:43 +09:00
|
|
|
"""
|
2025-03-22 13:36:14 +08:00
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
import pytest
|
2025-03-22 13:36:14 +08:00
|
|
|
import torch
|
|
|
|
|
|
|
|
from vllm.platforms import current_platform
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2024-11-05 16:02:23 -05:00
|
|
|
from ...utils import check_logprobs_close
|
2024-06-30 11:44:25 +08:00
|
|
|
|
2025-03-15 01:02:20 -04:00
|
|
|
# These have unsupported head_dim for FA. We do not
|
|
|
|
# not have a clean way to fall back, so we fail with
|
|
|
|
# a clear msg when it happens.
|
|
|
|
# https://github.com/vllm-project/vllm/issues/14524
|
|
|
|
REQUIRES_V0 = ["microsoft/phi-2", "stabilityai/stablelm-3b-4e1t"]
|
|
|
|
|
2025-03-22 13:36:14 +08:00
|
|
|
# This list contains the model that are using AITER kernel.
|
|
|
|
# Skip model that are not using AITER tests.
|
|
|
|
# When more AITER kernels are added, this list will not be
|
|
|
|
# needed as all the models will be calling AITER kernels
|
|
|
|
# in parts of the operators
|
|
|
|
AITER_MODEL_LIST = [
|
|
|
|
"meta-llama/Llama-3.2-1B-Instruct",
|
|
|
|
"openbmb/MiniCPM3-4B",
|
|
|
|
"Qwen/Qwen-7B",
|
|
|
|
"Qwen/Qwen2.5-0.5B-Instruct",
|
|
|
|
"ehristoforu/Falcon3-MoE-2x7B-Insruct",
|
|
|
|
]
|
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2025-03-22 13:36:14 +08:00
|
|
|
# @maybe_test_rocm_aiter
|
2024-11-15 12:23:09 +08:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"model",
|
|
|
|
[
|
|
|
|
pytest.param(
|
|
|
|
"bigscience/bloom-560m", # bloom - testing alibi slopes
|
|
|
|
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"openai-community/gpt2", # gpt2
|
|
|
|
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
|
|
|
),
|
|
|
|
pytest.param("Milos/slovak-gpt-j-405M"), # gptj
|
|
|
|
pytest.param("bigcode/tiny_starcoder_py"), # gpt_bigcode
|
|
|
|
pytest.param("EleutherAI/pythia-70m"), # gpt_neox
|
|
|
|
pytest.param(
|
|
|
|
"google/gemma-1.1-2b-it", # gemma
|
|
|
|
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
|
|
|
),
|
2025-02-10 18:45:21 +08:00
|
|
|
pytest.param(
|
2025-02-13 20:34:00 +08:00
|
|
|
"THUDM/chatglm3-6b", # chatglm (text-only)
|
2025-02-10 18:45:21 +08:00
|
|
|
),
|
2024-11-15 12:23:09 +08:00
|
|
|
pytest.param(
|
|
|
|
"meta-llama/Llama-3.2-1B-Instruct", # llama
|
|
|
|
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"openbmb/MiniCPM3-4B",
|
|
|
|
# fused_moe not supported on CPU
|
|
|
|
marks=[pytest.mark.core_model],
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"facebook/opt-125m", # opt
|
|
|
|
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"microsoft/phi-2", # phi
|
|
|
|
marks=[pytest.mark.core_model],
|
|
|
|
),
|
2025-02-10 18:45:21 +08:00
|
|
|
pytest.param(
|
|
|
|
"Qwen/Qwen-7B", # qwen (text-only)
|
|
|
|
),
|
2024-11-15 12:23:09 +08:00
|
|
|
pytest.param(
|
|
|
|
"Qwen/Qwen2.5-0.5B-Instruct", # qwen2
|
|
|
|
marks=[pytest.mark.core_model],
|
|
|
|
),
|
|
|
|
pytest.param("stabilityai/stablelm-3b-4e1t"), # stablelm
|
|
|
|
pytest.param("bigcode/starcoder2-3b"), # starcoder2
|
2025-01-11 00:07:58 +08:00
|
|
|
pytest.param(
|
|
|
|
"ehristoforu/Falcon3-MoE-2x7B-Insruct", # mixtral
|
|
|
|
marks=[pytest.mark.cpu_model],
|
|
|
|
)
|
2024-11-15 12:23:09 +08:00
|
|
|
])
|
|
|
|
@pytest.mark.parametrize("dtype", ["half"])
|
2024-11-05 16:02:23 -05:00
|
|
|
@pytest.mark.parametrize("max_tokens", [32])
|
|
|
|
@pytest.mark.parametrize("num_logprobs", [5])
|
2025-03-22 13:36:14 +08:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"use_rocm_aiter", [True, False] if current_platform.is_rocm() else [False])
|
|
|
|
def test_models(hf_runner, vllm_runner, example_prompts, model: str,
|
|
|
|
dtype: str, max_tokens: int, num_logprobs: int,
|
|
|
|
use_rocm_aiter: bool, monkeypatch) -> None:
|
|
|
|
|
2025-03-15 01:02:20 -04:00
|
|
|
if model in REQUIRES_V0:
|
|
|
|
monkeypatch.setenv("VLLM_USE_V1", "0")
|
2024-03-29 13:06:40 +09:00
|
|
|
|
2025-03-22 13:36:14 +08:00
|
|
|
if use_rocm_aiter and (model in AITER_MODEL_LIST):
|
|
|
|
monkeypatch.setenv("VLLM_ROCM_USE_AITER", "1")
|
|
|
|
elif use_rocm_aiter and model not in AITER_MODEL_LIST:
|
|
|
|
# Skip model that are not using AITER tests.
|
|
|
|
# When more AITER kernels are added, this list will not be
|
|
|
|
# needed as all the models will be calling AITER kernels
|
|
|
|
# in parts of the operators
|
|
|
|
pytest.skip(f"Skipping '{model}' model test with AITER kernel.")
|
|
|
|
|
2024-06-07 22:31:32 -07:00
|
|
|
with hf_runner(model, dtype=dtype) as hf_model:
|
2025-02-10 18:45:21 +08:00
|
|
|
if model.startswith("THUDM/chatglm3"):
|
|
|
|
hf_model.model.get_output_embeddings = lambda: \
|
|
|
|
hf_model.model.transformer.output_layer
|
|
|
|
|
2024-11-05 16:02:23 -05:00
|
|
|
hf_outputs = hf_model.generate_greedy_logprobs_limit(
|
|
|
|
example_prompts, max_tokens, num_logprobs)
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
with vllm_runner(model, dtype=dtype) as vllm_model:
|
2024-11-05 16:02:23 -05:00
|
|
|
vllm_outputs = vllm_model.generate_greedy_logprobs(
|
|
|
|
example_prompts, max_tokens, num_logprobs)
|
2025-01-20 15:00:59 +08:00
|
|
|
|
2024-11-05 16:02:23 -05:00
|
|
|
check_logprobs_close(
|
2024-06-30 11:44:25 +08:00
|
|
|
outputs_0_lst=hf_outputs,
|
|
|
|
outputs_1_lst=vllm_outputs,
|
|
|
|
name_0="hf",
|
|
|
|
name_1="vllm",
|
|
|
|
)
|
2025-03-22 13:36:14 +08:00
|
|
|
if use_rocm_aiter:
|
|
|
|
# this is to ensure that vllm engine
|
|
|
|
# has deallocated the memory before running the next
|
|
|
|
# unit tests. On ROCm, when using AITER
|
|
|
|
# the memory might not be deallocated completely
|
|
|
|
# before running the next test case
|
|
|
|
torch.cuda.synchronize()
|