2025-02-12 14:16:06 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
2024-08-16 00:16:20 -04:00
|
|
|
# Expanded quantized model tests for CPU offloading
|
|
|
|
# Base tests: tests/basic_correctness/test_cpu_offload.py
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
from tests.quantization.utils import is_quant_method_supported
|
|
|
|
|
|
|
|
from ..utils import compare_two_settings
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.skipif(not is_quant_method_supported("fp8"),
|
|
|
|
reason="fp8 is not supported on this GPU type.")
|
|
|
|
def test_cpu_offload_fp8():
|
|
|
|
# Test quantization of an unquantized checkpoint
|
2025-02-12 14:16:06 -05:00
|
|
|
compare_two_settings("meta-llama/Llama-3.2-1B-Instruct",
|
2024-08-16 00:16:20 -04:00
|
|
|
["--quantization", "fp8"],
|
2025-02-12 14:16:06 -05:00
|
|
|
["--quantization", "fp8", "--cpu-offload-gb", "1"],
|
2024-08-20 17:12:44 -07:00
|
|
|
max_wait_seconds=480)
|
2024-08-16 00:16:20 -04:00
|
|
|
# Test loading a quantized checkpoint
|
2025-02-12 14:16:06 -05:00
|
|
|
compare_two_settings("neuralmagic/Qwen2-1.5B-Instruct-FP8", [],
|
|
|
|
["--cpu-offload-gb", "1"],
|
2024-08-20 17:12:44 -07:00
|
|
|
max_wait_seconds=480)
|
2024-08-16 00:16:20 -04:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
|
|
|
|
reason="gptq_marlin is not supported on this GPU type.")
|
|
|
|
def test_cpu_offload_gptq():
|
|
|
|
# Test GPTQ Marlin
|
|
|
|
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-GPTQ-Int4", [],
|
2024-08-20 17:12:44 -07:00
|
|
|
["--cpu-offload-gb", "1"],
|
|
|
|
max_wait_seconds=480)
|
2024-08-16 00:16:20 -04:00
|
|
|
# Test GPTQ
|
|
|
|
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-GPTQ-Int4",
|
|
|
|
["--quantization", "gptq"],
|
2024-08-20 17:12:44 -07:00
|
|
|
["--quantization", "gptq", "--cpu-offload-gb", "1"],
|
|
|
|
max_wait_seconds=480)
|
2024-08-16 00:16:20 -04:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.skipif(not is_quant_method_supported("awq_marlin"),
|
|
|
|
reason="awq_marlin is not supported on this GPU type.")
|
|
|
|
def test_cpu_offload_awq():
|
|
|
|
# Test AWQ Marlin
|
|
|
|
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-AWQ", [],
|
2024-08-20 17:12:44 -07:00
|
|
|
["--cpu-offload-gb", "1"],
|
|
|
|
max_wait_seconds=480)
|
2024-08-16 00:16:20 -04:00
|
|
|
# Test AWQ
|
|
|
|
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-AWQ",
|
|
|
|
["--quantization", "awq"],
|
2024-08-20 17:12:44 -07:00
|
|
|
["--quantization", "awq", "--cpu-offload-gb", "1"],
|
|
|
|
max_wait_seconds=480)
|
2024-08-16 00:16:20 -04:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
|
|
|
|
reason="gptq_marlin is not supported on this GPU type.")
|
|
|
|
def test_cpu_offload_compressed_tensors():
|
|
|
|
# Test wNa16
|
|
|
|
compare_two_settings("nm-testing/tinyllama-oneshot-w4a16-channel-v2", [],
|
2024-08-20 17:12:44 -07:00
|
|
|
["--cpu-offload-gb", "1"],
|
|
|
|
max_wait_seconds=480)
|
2024-08-16 00:16:20 -04:00
|
|
|
# Test w4a16_marlin24
|
|
|
|
compare_two_settings("nm-testing/llama7b-one-shot-2_4-w4a16-marlin24-t",
|
2024-08-20 17:12:44 -07:00
|
|
|
[], ["--cpu-offload-gb", "1"],
|
|
|
|
max_wait_seconds=480)
|
2024-08-16 00:16:20 -04:00
|
|
|
# Test w8a8
|
|
|
|
compare_two_settings(
|
|
|
|
"nm-testing/tinyllama-oneshot-w8w8-test-static-shape-change", [],
|
2024-08-20 17:12:44 -07:00
|
|
|
["--cpu-offload-gb", "1"],
|
|
|
|
max_wait_seconds=480)
|