[ CI/Build ] Added E2E Test For Compressed Tensors (#5839)
Co-authored-by: Michael Goin <michael@neuralmagic.com> Co-authored-by: Robert Shaw <rshaw@neuralmagic>
This commit is contained in:
parent
f7dac83d95
commit
8dbfcd35bf
@ -14,6 +14,8 @@ peft
|
||||
requests
|
||||
ray
|
||||
sentence-transformers # required for embedding
|
||||
sparseml==1.8.0 # required for compressed-tensors
|
||||
compressed-tensors==0.4.0 # required for compressed-tensors
|
||||
|
||||
# Benchmarking
|
||||
aiohttp
|
||||
|
@ -176,6 +176,7 @@ class HfRunner:
|
||||
model_kwargs: Optional[Dict[str, Any]] = None,
|
||||
is_embedding_model: bool = False,
|
||||
is_vision_model: bool = False,
|
||||
is_sparseml_model: bool = False,
|
||||
) -> None:
|
||||
assert dtype in _STR_DTYPE_TO_TORCH_DTYPE
|
||||
torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]
|
||||
@ -193,6 +194,9 @@ class HfRunner:
|
||||
else:
|
||||
if is_vision_model:
|
||||
auto_cls = AutoModelForVision2Seq
|
||||
elif is_sparseml_model:
|
||||
from sparseml.transformers import SparseAutoModelForCausalLM
|
||||
auto_cls = SparseAutoModelForCausalLM
|
||||
else:
|
||||
auto_cls = AutoModelForCausalLM
|
||||
|
||||
|
49
tests/models/test_compressed_tensors.py
Normal file
49
tests/models/test_compressed_tensors.py
Normal file
@ -0,0 +1,49 @@
|
||||
"""Compares vllm vs sparseml for compressed-tensors
|
||||
|
||||
Note: vllm and sparseml do not have bitwise correctness,
|
||||
so in this test, we just confirm that the top selected
|
||||
tokens of the are in the top 5 selections of each other.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.quantization.utils import is_quant_method_supported
|
||||
|
||||
from .utils import check_logprobs_close
|
||||
|
||||
MODELS = [
|
||||
"nm-testing/Meta-Llama-3-8B-Instruct-W8-Channel-A8-Dynamic-Per-Token-Test",
|
||||
]
|
||||
|
||||
MAX_TOKENS = 32
|
||||
NUM_LOGPROBS = 5
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not is_quant_method_supported("compressed-tensors"),
|
||||
reason="compressed-tensors is not supported on this machine type.")
|
||||
@pytest.mark.parametrize("model_name", MODELS)
|
||||
def test_models(
|
||||
vllm_runner,
|
||||
hf_runner,
|
||||
example_prompts,
|
||||
model_name,
|
||||
) -> None:
|
||||
# Run sparseml.
|
||||
with hf_runner(model_name=model_name,
|
||||
is_sparseml_model=True) as sparseml_model:
|
||||
|
||||
sparseml_outputs = sparseml_model.generate_greedy_logprobs_limit(
|
||||
example_prompts, MAX_TOKENS, NUM_LOGPROBS)
|
||||
|
||||
# Run vllm.
|
||||
with vllm_runner(model_name=model_name) as vllm_model:
|
||||
vllm_outputs = vllm_model.generate_greedy_logprobs(
|
||||
example_prompts, MAX_TOKENS, NUM_LOGPROBS)
|
||||
|
||||
check_logprobs_close(
|
||||
outputs_0_lst=sparseml_outputs,
|
||||
outputs_1_lst=vllm_outputs,
|
||||
name_0="sparseml",
|
||||
name_1="vllm",
|
||||
)
|
@ -34,7 +34,8 @@ class CompressedTensorsConfig(QuantizationConfig):
|
||||
return [torch.float16, torch.bfloat16]
|
||||
|
||||
# Need to figure it out
|
||||
def get_min_capability(self) -> int:
|
||||
@classmethod
|
||||
def get_min_capability(cls) -> int:
|
||||
return 60
|
||||
|
||||
def get_name(self) -> str:
|
||||
|
Loading…
x
Reference in New Issue
Block a user