2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2024-08-06 07:54:23 +08:00
|
|
|
"""
|
|
|
|
Tests gguf models against unquantized models generations
|
|
|
|
Note: To pass the test, quantization higher than Q4 should be used
|
|
|
|
"""
|
|
|
|
|
|
|
|
import os
|
2025-03-03 01:34:51 +00:00
|
|
|
from typing import NamedTuple
|
2024-08-06 07:54:23 +08:00
|
|
|
|
|
|
|
import pytest
|
|
|
|
from huggingface_hub import hf_hub_download
|
2025-04-08 13:38:13 +08:00
|
|
|
from pytest import MarkDecorator
|
2024-08-20 05:30:14 +08:00
|
|
|
from transformers import AutoTokenizer
|
2024-08-06 07:54:23 +08:00
|
|
|
|
|
|
|
from tests.quantization.utils import is_quant_method_supported
|
|
|
|
|
2025-01-13 08:13:44 +08:00
|
|
|
from ....conftest import VllmRunner
|
2025-04-08 13:38:13 +08:00
|
|
|
from ....utils import multi_gpu_test
|
2024-09-14 01:20:06 +08:00
|
|
|
from ...utils import check_logprobs_close
|
2024-08-06 07:54:23 +08:00
|
|
|
|
|
|
|
os.environ["TOKENIZERS_PARALLELISM"] = "true"
|
|
|
|
|
|
|
|
MAX_MODEL_LEN = 1024
|
|
|
|
|
|
|
|
|
2025-01-13 08:13:44 +08:00
|
|
|
class GGUFTestConfig(NamedTuple):
|
|
|
|
original_model: str
|
|
|
|
gguf_repo: str
|
|
|
|
gguf_filename: str
|
2025-04-08 13:38:13 +08:00
|
|
|
marks: list[MarkDecorator] = []
|
2025-01-13 08:13:44 +08:00
|
|
|
|
|
|
|
@property
|
|
|
|
def gguf_model(self):
|
|
|
|
return hf_hub_download(self.gguf_repo, filename=self.gguf_filename)
|
|
|
|
|
|
|
|
|
|
|
|
LLAMA_CONFIG = GGUFTestConfig(
|
|
|
|
original_model="meta-llama/Llama-3.2-1B-Instruct",
|
|
|
|
gguf_repo="bartowski/Llama-3.2-1B-Instruct-GGUF",
|
|
|
|
gguf_filename="Llama-3.2-1B-Instruct-IQ4_XS.gguf",
|
2025-04-08 13:38:13 +08:00
|
|
|
marks=[pytest.mark.quant_model],
|
2025-01-13 08:13:44 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
QWEN2_CONFIG = GGUFTestConfig(
|
|
|
|
original_model="Qwen/Qwen2.5-1.5B-Instruct",
|
|
|
|
gguf_repo="Qwen/Qwen2.5-1.5B-Instruct-GGUF",
|
|
|
|
gguf_filename="qwen2.5-1.5b-instruct-q6_k.gguf",
|
|
|
|
)
|
|
|
|
|
|
|
|
PHI3_CONFIG = GGUFTestConfig(
|
|
|
|
original_model="microsoft/Phi-3.5-mini-instruct",
|
|
|
|
gguf_repo="bartowski/Phi-3.5-mini-instruct-GGUF",
|
|
|
|
gguf_filename="Phi-3.5-mini-instruct-IQ4_XS.gguf",
|
|
|
|
)
|
|
|
|
|
|
|
|
GPT2_CONFIG = GGUFTestConfig(
|
|
|
|
original_model="openai-community/gpt2-large",
|
|
|
|
gguf_repo="QuantFactory/gpt2-large-GGUF",
|
|
|
|
gguf_filename="gpt2-large.Q4_K_M.gguf",
|
|
|
|
)
|
|
|
|
|
|
|
|
STABLELM_CONFIG = GGUFTestConfig(
|
|
|
|
original_model="stabilityai/stablelm-3b-4e1t",
|
|
|
|
gguf_repo="afrideva/stablelm-3b-4e1t-GGUF",
|
|
|
|
gguf_filename="stablelm-3b-4e1t.q4_k_m.gguf",
|
|
|
|
)
|
|
|
|
|
|
|
|
STARCODER_CONFIG = GGUFTestConfig(
|
|
|
|
original_model="bigcode/starcoder2-3b",
|
|
|
|
gguf_repo="QuantFactory/starcoder2-3b-GGUF",
|
|
|
|
gguf_filename="starcoder2-3b.Q6_K.gguf",
|
|
|
|
)
|
|
|
|
|
2025-01-21 05:23:14 +01:00
|
|
|
DOLPHIN_CONFIG = GGUFTestConfig(
|
|
|
|
# Test VocabParallelEmbedding sharding issue.
|
|
|
|
original_model="cognitivecomputations/TinyDolphin-2.8-1.1b",
|
|
|
|
gguf_repo="tsunemoto/TinyDolphin-2.8-1.1b-GGUF",
|
|
|
|
gguf_filename="tinydolphin-2.8-1.1b.Q6_K.gguf",
|
|
|
|
)
|
|
|
|
|
2025-01-13 08:13:44 +08:00
|
|
|
MODELS = [
|
2025-01-21 17:15:27 -08:00
|
|
|
LLAMA_CONFIG, QWEN2_CONFIG, PHI3_CONFIG, GPT2_CONFIG, STABLELM_CONFIG,
|
2025-01-21 05:23:14 +01:00
|
|
|
DOLPHIN_CONFIG
|
2025-01-13 08:13:44 +08:00
|
|
|
# STARCODER_CONFIG, # broken
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2025-04-08 13:38:13 +08:00
|
|
|
def check_model_outputs(
|
2025-03-03 01:34:51 +00:00
|
|
|
vllm_runner: type[VllmRunner],
|
2025-04-08 13:38:13 +08:00
|
|
|
prompts: list[str],
|
2025-01-13 08:13:44 +08:00
|
|
|
model: GGUFTestConfig,
|
2024-08-06 07:54:23 +08:00
|
|
|
dtype: str,
|
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
2024-08-20 05:30:14 +08:00
|
|
|
tp_size: int,
|
2025-04-08 13:38:13 +08:00
|
|
|
):
|
2025-01-13 08:13:44 +08:00
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model.original_model)
|
|
|
|
if tokenizer.chat_template is not None:
|
|
|
|
messages = [[{
|
|
|
|
'role': 'user',
|
|
|
|
'content': prompt
|
2025-04-08 13:38:13 +08:00
|
|
|
}] for prompt in prompts]
|
|
|
|
prompts = tokenizer.apply_chat_template(messages,
|
|
|
|
tokenize=False,
|
|
|
|
add_generation_prompt=True)
|
2024-08-20 05:30:14 +08:00
|
|
|
|
2024-08-06 07:54:23 +08:00
|
|
|
# Run gguf model.
|
2025-01-13 08:13:44 +08:00
|
|
|
with vllm_runner(model_name=model.gguf_model,
|
2025-01-21 05:23:14 +01:00
|
|
|
enforce_eager=True,
|
2025-01-13 08:13:44 +08:00
|
|
|
tokenizer_name=model.original_model,
|
2024-08-06 07:54:23 +08:00
|
|
|
dtype=dtype,
|
|
|
|
max_model_len=MAX_MODEL_LEN,
|
2024-08-20 05:30:14 +08:00
|
|
|
tensor_parallel_size=tp_size) as gguf_model:
|
2024-08-06 07:54:23 +08:00
|
|
|
gguf_outputs = gguf_model.generate_greedy_logprobs(
|
2025-04-08 13:38:13 +08:00
|
|
|
prompts[:-1], max_tokens, num_logprobs)
|
2024-08-06 07:54:23 +08:00
|
|
|
|
2025-03-15 01:02:20 -04:00
|
|
|
# Run unquantized model.
|
2025-04-08 13:38:13 +08:00
|
|
|
# Should run with tp=1, otherwise the test will stuck at
|
|
|
|
# nccl initialization.
|
2025-03-15 01:02:20 -04:00
|
|
|
with vllm_runner(
|
|
|
|
model_name=model.original_model,
|
|
|
|
enforce_eager=True, # faster tests
|
|
|
|
dtype=dtype,
|
|
|
|
max_model_len=MAX_MODEL_LEN,
|
2025-04-08 13:38:13 +08:00
|
|
|
tensor_parallel_size=1) as original_model:
|
2025-03-15 01:02:20 -04:00
|
|
|
original_outputs = original_model.generate_greedy_logprobs(
|
2025-04-08 13:38:13 +08:00
|
|
|
prompts[:-1], max_tokens, num_logprobs)
|
2025-03-15 01:02:20 -04:00
|
|
|
|
2024-08-06 07:54:23 +08:00
|
|
|
check_logprobs_close(
|
|
|
|
outputs_0_lst=original_outputs,
|
|
|
|
outputs_1_lst=gguf_outputs,
|
|
|
|
name_0="original",
|
|
|
|
name_1="gguf",
|
|
|
|
)
|
2025-04-08 13:38:13 +08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.skipif(not is_quant_method_supported("gguf"),
|
|
|
|
reason="gguf is not supported on this GPU type.")
|
|
|
|
@pytest.mark.parametrize("model", [
|
|
|
|
pytest.param(test_config, marks=test_config.marks)
|
|
|
|
for test_config in MODELS
|
|
|
|
])
|
|
|
|
@pytest.mark.parametrize("dtype", ["half"])
|
|
|
|
@pytest.mark.parametrize("max_tokens", [32])
|
|
|
|
@pytest.mark.parametrize("num_logprobs", [5])
|
|
|
|
@pytest.mark.parametrize("tp_size", [1])
|
|
|
|
def test_models(
|
|
|
|
vllm_runner: type[VllmRunner],
|
|
|
|
example_prompts: list[str],
|
|
|
|
model: GGUFTestConfig,
|
|
|
|
dtype: str,
|
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
|
|
|
tp_size: int,
|
|
|
|
) -> None:
|
|
|
|
check_model_outputs(vllm_runner, example_prompts, model, dtype, max_tokens,
|
|
|
|
num_logprobs, tp_size)
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.skipif(not is_quant_method_supported("gguf"),
|
|
|
|
reason="gguf is not supported on this GPU type.")
|
|
|
|
@pytest.mark.parametrize("model", [LLAMA_CONFIG])
|
|
|
|
@pytest.mark.parametrize("dtype", ["half"])
|
|
|
|
@pytest.mark.parametrize("max_tokens", [8])
|
|
|
|
@pytest.mark.parametrize("num_logprobs", [5])
|
|
|
|
@pytest.mark.parametrize("tp_size", [2])
|
|
|
|
@multi_gpu_test(num_gpus=2)
|
|
|
|
def test_distributed(
|
|
|
|
vllm_runner: type[VllmRunner],
|
|
|
|
example_prompts: list[str],
|
|
|
|
model: GGUFTestConfig,
|
|
|
|
dtype: str,
|
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
|
|
|
tp_size: int,
|
|
|
|
) -> None:
|
|
|
|
check_model_outputs(vllm_runner, example_prompts, model, dtype, max_tokens,
|
|
|
|
num_logprobs, tp_size)
|