2024-03-01 14:47:51 -06:00
|
|
|
"""Compare the outputs of a GPTQ model to a Marlin model.
|
|
|
|
|
2024-03-10 19:49:14 -07:00
|
|
|
Note: GPTQ and Marlin do not have bitwise correctness.
|
|
|
|
As a result, in this test, we just confirm that the top selected tokens of the
|
2024-03-01 14:47:51 -06:00
|
|
|
Marlin/GPTQ models are in the top 3 selections of each other.
|
|
|
|
|
|
|
|
Note: Marlin internally uses locks to synchronize the threads. This can
|
|
|
|
result in very slight nondeterminism for Marlin. As a result, we re-run the test
|
|
|
|
up to 3 times to see if we pass.
|
|
|
|
|
2024-03-29 13:06:40 +09:00
|
|
|
Run `pytest tests/models/test_marlin.py`.
|
2024-03-01 14:47:51 -06:00
|
|
|
"""
|
2024-03-25 23:59:47 +09:00
|
|
|
from dataclasses import dataclass
|
|
|
|
|
2024-03-01 14:47:51 -06:00
|
|
|
import pytest
|
2024-06-12 12:03:24 -05:00
|
|
|
import torch
|
2024-03-25 23:59:47 +09:00
|
|
|
|
2024-06-12 12:03:24 -05:00
|
|
|
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
|
2024-03-01 14:47:51 -06:00
|
|
|
|
2024-05-13 22:50:09 +08:00
|
|
|
from .utils import check_logprobs_close
|
|
|
|
|
2024-06-12 12:03:24 -05:00
|
|
|
marlin_not_supported = True
|
|
|
|
|
|
|
|
if torch.cuda.is_available():
|
|
|
|
capability = torch.cuda.get_device_capability()
|
|
|
|
capability = capability[0] * 10 + capability[1]
|
|
|
|
marlin_not_supported = (
|
|
|
|
capability < QUANTIZATION_METHODS["marlin"].get_min_capability())
|
|
|
|
|
2024-03-01 14:47:51 -06:00
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class ModelPair:
|
|
|
|
model_marlin: str
|
|
|
|
model_gptq: str
|
|
|
|
|
|
|
|
|
|
|
|
model_pairs = [
|
|
|
|
ModelPair(model_marlin="nm-testing/zephyr-beta-7b-marlin-g128",
|
|
|
|
model_gptq="nm-testing/zephyr-beta-7b-gptq-g128"),
|
|
|
|
ModelPair(model_marlin="robertgshaw2/zephyr-7b-beta-channelwise-marlin",
|
|
|
|
model_gptq="robertgshaw2/zephyr-7b-beta-channelwise-gptq"),
|
|
|
|
ModelPair(model_marlin="robertgshaw2/TinyLlama-1.1B-Chat-v1.0-g128-marlin",
|
|
|
|
model_gptq="robertgshaw2/TinyLlama-1.1B-Chat-v1.0-g128-gptq")
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.flaky(reruns=2)
|
2024-06-12 12:03:24 -05:00
|
|
|
@pytest.mark.skipif(marlin_not_supported,
|
2024-03-01 14:47:51 -06:00
|
|
|
reason="Marlin is not supported on this GPU type.")
|
|
|
|
@pytest.mark.parametrize("model_pair", model_pairs)
|
|
|
|
@pytest.mark.parametrize("dtype", ["half"])
|
|
|
|
@pytest.mark.parametrize("max_tokens", [32])
|
2024-03-30 21:18:34 -05:00
|
|
|
@pytest.mark.parametrize("num_logprobs", [5])
|
2024-03-01 14:47:51 -06:00
|
|
|
def test_models(
|
|
|
|
vllm_runner,
|
|
|
|
example_prompts,
|
|
|
|
model_pair: ModelPair,
|
|
|
|
dtype: str,
|
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
|
|
|
) -> None:
|
2024-06-08 01:59:20 -07:00
|
|
|
with vllm_runner(model_pair.model_marlin,
|
|
|
|
dtype=dtype,
|
|
|
|
quantization="marlin") as marlin_model:
|
|
|
|
marlin_outputs = marlin_model.generate_greedy_logprobs(
|
|
|
|
example_prompts, max_tokens, num_logprobs)
|
|
|
|
|
|
|
|
with vllm_runner(model_pair.model_gptq, dtype=dtype,
|
|
|
|
quantization="gptq") as gptq_model:
|
|
|
|
gptq_outputs = gptq_model.generate_greedy_logprobs(
|
|
|
|
example_prompts, max_tokens, num_logprobs)
|
2024-03-01 14:47:51 -06:00
|
|
|
|
2024-04-29 12:35:34 -04:00
|
|
|
check_logprobs_close(
|
|
|
|
outputs_0_lst=gptq_outputs,
|
|
|
|
outputs_1_lst=marlin_outputs,
|
|
|
|
name_0="gptq",
|
|
|
|
name_1="marlin",
|
|
|
|
)
|