2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
2024-04-30 17:46:12 -04:00
|
|
|
# flake8: noqa
|
|
|
|
"""Tests fp8 models against ground truth generation
|
|
|
|
Note: these tests will only pass on L4 GPU.
|
|
|
|
"""
|
|
|
|
import os
|
2024-09-04 11:53:25 -07:00
|
|
|
from typing import Optional
|
2024-04-30 17:46:12 -04:00
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
2024-09-04 11:53:25 -07:00
|
|
|
from tests.kernels.utils import override_backend_env_variable
|
2024-06-13 11:18:08 -04:00
|
|
|
from tests.quantization.utils import is_quant_method_supported
|
2024-04-30 17:46:12 -04:00
|
|
|
|
2024-09-14 01:20:06 +08:00
|
|
|
from ...utils import check_logprobs_close
|
2024-04-30 17:46:12 -04:00
|
|
|
|
2024-09-04 11:53:25 -07:00
|
|
|
os.environ["TOKENIZERS_PARALLELISM"] = "true"
|
2024-04-30 17:46:12 -04:00
|
|
|
|
2024-06-04 01:39:50 +08:00
|
|
|
|
2024-11-10 03:39:14 +08:00
|
|
|
@pytest.mark.quant_model
|
2024-06-13 11:18:08 -04:00
|
|
|
@pytest.mark.skipif(not is_quant_method_supported("fp8"),
|
2024-04-30 17:46:12 -04:00
|
|
|
reason="fp8 is not supported on this GPU type.")
|
2024-09-04 11:53:25 -07:00
|
|
|
@pytest.mark.parametrize(
|
2025-01-23 13:04:03 -05:00
|
|
|
"kv_cache_dtype,base_model,test_model",
|
2024-09-04 11:53:25 -07:00
|
|
|
[
|
|
|
|
# Test FP8 checkpoint w. fp8_e4m3 kv-cache scaling factors.
|
2024-11-05 16:02:23 -05:00
|
|
|
("fp8_e4m3", "meta-llama/Llama-3.2-1B-Instruct",
|
2025-01-23 13:04:03 -05:00
|
|
|
"nm-testing/Llama-3.2-1B-Instruct-FP8-KV"),
|
2025-02-14 06:18:03 +00:00
|
|
|
# Test BF16 checkpoint w. fp8_e5m2 kv-cache.
|
2024-11-05 16:02:23 -05:00
|
|
|
("fp8_e5m2", "meta-llama/Llama-3.2-1B-Instruct",
|
2025-01-23 13:04:03 -05:00
|
|
|
"meta-llama/Llama-3.2-1B-Instruct"),
|
2025-02-14 06:18:03 +00:00
|
|
|
# Test BF16 checkpoint w. fp8_e4m3 kv-cache scaling factors in json.
|
|
|
|
("fp8_e4m3", "meta-llama/Llama-3.2-1B-Instruct",
|
|
|
|
"meta-llama/Llama-3.2-1B-Instruct")
|
2024-09-04 11:53:25 -07:00
|
|
|
])
|
|
|
|
# Due to low-precision numerical divergence, we only test logprob of 4 tokens
|
|
|
|
@pytest.mark.parametrize("max_tokens", [4])
|
2024-11-05 16:02:23 -05:00
|
|
|
@pytest.mark.parametrize("enforce_eager", [True])
|
2024-09-04 11:53:25 -07:00
|
|
|
@pytest.mark.parametrize("backend", ["FLASH_ATTN", "XFORMERS", "FLASHINFER"])
|
|
|
|
# NOTE: Increasing this in this suite will fail CI because we currently cannot
|
|
|
|
# reset distributed env properly. Use a value > 1 just when you test.
|
|
|
|
@pytest.mark.parametrize("tensor_parallel_size", [1])
|
|
|
|
# Due to low-precision numerical divergence, this test is too sensitive for
|
|
|
|
# the async postprocessor
|
|
|
|
@pytest.mark.parametrize("disable_async_output_proc", [True])
|
|
|
|
def test_models(
|
|
|
|
vllm_runner,
|
|
|
|
example_prompts,
|
|
|
|
kv_cache_dtype: str,
|
|
|
|
base_model: str,
|
|
|
|
test_model: str,
|
|
|
|
max_tokens: int,
|
|
|
|
enforce_eager: bool,
|
|
|
|
backend: str,
|
|
|
|
tensor_parallel_size: int,
|
|
|
|
disable_async_output_proc: bool,
|
|
|
|
monkeypatch,
|
|
|
|
) -> None:
|
|
|
|
"""
|
|
|
|
Only checks log probs match to cover the discrepancy in
|
|
|
|
numerical sensitive kernels.
|
|
|
|
"""
|
|
|
|
override_backend_env_variable(monkeypatch, backend)
|
|
|
|
|
|
|
|
MAX_MODEL_LEN = 1024
|
|
|
|
NUM_LOG_PROBS = 8
|
|
|
|
|
|
|
|
with vllm_runner(
|
|
|
|
base_model,
|
|
|
|
max_model_len=MAX_MODEL_LEN,
|
|
|
|
tensor_parallel_size=tensor_parallel_size,
|
|
|
|
enforce_eager=enforce_eager,
|
|
|
|
kv_cache_dtype="auto",
|
|
|
|
disable_async_output_proc=disable_async_output_proc,
|
|
|
|
) as vllm_model:
|
|
|
|
baseline_outputs = vllm_model.generate_greedy_logprobs(
|
|
|
|
example_prompts, max_tokens, NUM_LOG_PROBS)
|
2024-04-30 17:46:12 -04:00
|
|
|
|
2024-09-04 11:53:25 -07:00
|
|
|
with vllm_runner(
|
|
|
|
test_model,
|
|
|
|
max_model_len=MAX_MODEL_LEN,
|
|
|
|
tensor_parallel_size=tensor_parallel_size,
|
|
|
|
enforce_eager=enforce_eager,
|
|
|
|
kv_cache_dtype=kv_cache_dtype,
|
|
|
|
disable_async_output_proc=disable_async_output_proc,
|
|
|
|
) as vllm_model:
|
|
|
|
test_outputs = vllm_model.generate_greedy_logprobs(
|
|
|
|
example_prompts, max_tokens, NUM_LOG_PROBS)
|
2024-04-30 17:46:12 -04:00
|
|
|
|
2024-09-04 11:53:25 -07:00
|
|
|
check_logprobs_close(
|
|
|
|
outputs_0_lst=baseline_outputs,
|
|
|
|
outputs_1_lst=test_outputs,
|
|
|
|
name_0="fp16_kv_cache",
|
|
|
|
name_1="fp8_kv_cache",
|
|
|
|
)
|