2024-04-19 21:28:57 -07:00
|
|
|
"""Tests whether FP8 computation is enabled correctly.
|
|
|
|
|
|
|
|
Run `pytest tests/quantization/test_fp8.py --forked`.
|
|
|
|
"""
|
|
|
|
import pytest
|
|
|
|
import torch
|
|
|
|
|
2024-06-12 12:58:02 -04:00
|
|
|
from tests.quantization.utils import is_quant_method_supported
|
2024-04-19 21:28:57 -07:00
|
|
|
from vllm.model_executor.layers.quantization.fp8 import Fp8LinearMethod
|
|
|
|
|
|
|
|
|
2024-06-12 12:58:02 -04:00
|
|
|
@pytest.mark.skipif(not is_quant_method_supported("fp8"),
|
|
|
|
reason="FP8 is not supported on this GPU type.")
|
2024-04-19 21:28:57 -07:00
|
|
|
def test_load_fp16_model(vllm_runner) -> None:
|
2024-06-08 01:59:20 -07:00
|
|
|
with vllm_runner("facebook/opt-125m", quantization="fp8") as llm:
|
2024-04-19 21:28:57 -07:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
model = llm.model.llm_engine.model_executor.driver_worker.model_runner.model # noqa: E501
|
|
|
|
fc1 = model.model.decoder.layers[0].fc1
|
|
|
|
assert isinstance(fc1.quant_method, Fp8LinearMethod)
|
|
|
|
assert fc1.weight.dtype == torch.float8_e4m3fn
|