2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
2024-07-30 14:48:50 -04:00
|
|
|
import gc
|
2024-04-16 11:34:39 -07:00
|
|
|
import json
|
|
|
|
import os
|
2024-06-12 15:13:52 -06:00
|
|
|
import pathlib
|
2024-04-13 20:13:01 -04:00
|
|
|
import subprocess
|
2025-01-20 15:00:59 +08:00
|
|
|
from functools import partial
|
2024-04-13 20:13:01 -04:00
|
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
|
2024-04-16 11:34:39 -07:00
|
|
|
import openai
|
2024-04-13 20:13:01 -04:00
|
|
|
import pytest
|
2024-06-12 15:13:52 -06:00
|
|
|
import torch
|
2024-11-15 17:34:17 +08:00
|
|
|
from huggingface_hub import snapshot_download
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
from vllm import SamplingParams
|
2024-06-12 15:13:52 -06:00
|
|
|
from vllm.engine.arg_utils import EngineArgs
|
2024-11-15 17:34:17 +08:00
|
|
|
# yapf conflicts with isort for this docstring
|
2024-05-13 17:57:07 -04:00
|
|
|
# yapf: disable
|
|
|
|
from vllm.model_executor.model_loader.tensorizer import (TensorizerConfig,
|
|
|
|
TensorSerializer,
|
|
|
|
is_vllm_tensorized,
|
|
|
|
load_with_tensorizer,
|
|
|
|
open_stream,
|
2024-06-12 15:13:52 -06:00
|
|
|
serialize_vllm_model,
|
|
|
|
tensorize_vllm_model)
|
2024-11-15 17:34:17 +08:00
|
|
|
# yapf: enable
|
2024-12-26 21:12:51 +08:00
|
|
|
from vllm.utils import PlaceholderModule, import_from_path
|
2024-04-13 20:13:01 -04:00
|
|
|
|
2024-11-15 17:34:17 +08:00
|
|
|
from ..utils import VLLM_PATH, RemoteOpenAIServer
|
2024-07-30 14:48:50 -04:00
|
|
|
from .conftest import retry_until_skip
|
2024-05-13 22:50:09 +08:00
|
|
|
|
2024-12-26 21:12:51 +08:00
|
|
|
try:
|
|
|
|
from tensorizer import EncryptionParams
|
|
|
|
except ImportError:
|
|
|
|
tensorizer = PlaceholderModule("tensorizer") # type: ignore[assignment]
|
|
|
|
EncryptionParams = tensorizer.placeholder_attr("EncryptionParams")
|
|
|
|
|
2024-11-15 17:34:17 +08:00
|
|
|
EXAMPLES_PATH = VLLM_PATH / "examples"
|
2024-05-13 17:57:07 -04:00
|
|
|
|
2024-04-13 20:13:01 -04:00
|
|
|
prompts = [
|
|
|
|
"Hello, my name is",
|
|
|
|
"The president of the United States is",
|
|
|
|
"The capital of France is",
|
|
|
|
"The future of AI is",
|
|
|
|
]
|
|
|
|
# Create a sampling params object.
|
|
|
|
sampling_params = SamplingParams(temperature=0.8, top_p=0.95, seed=0)
|
|
|
|
|
|
|
|
model_ref = "facebook/opt-125m"
|
2024-04-16 11:34:39 -07:00
|
|
|
tensorize_model_for_testing_script = os.path.join(
|
|
|
|
os.path.dirname(__file__), "tensorize_vllm_model_for_testing.py")
|
2024-04-13 20:13:01 -04:00
|
|
|
|
2024-07-30 14:48:50 -04:00
|
|
|
|
2024-04-13 20:13:01 -04:00
|
|
|
def is_curl_installed():
|
|
|
|
try:
|
|
|
|
subprocess.check_call(['curl', '--version'])
|
|
|
|
return True
|
|
|
|
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
|
|
return False
|
|
|
|
|
2024-07-30 14:48:50 -04:00
|
|
|
|
2024-06-12 15:13:52 -06:00
|
|
|
def write_keyfile(keyfile_path: str):
|
|
|
|
encryption_params = EncryptionParams.random()
|
|
|
|
pathlib.Path(keyfile_path).parent.mkdir(parents=True, exist_ok=True)
|
|
|
|
with open(keyfile_path, 'wb') as f:
|
|
|
|
f.write(encryption_params.key)
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
|
2024-04-16 11:34:39 -07:00
|
|
|
@patch('vllm.model_executor.model_loader.tensorizer.TensorizerAgent')
|
2024-04-13 20:13:01 -04:00
|
|
|
def test_load_with_tensorizer(mock_agent, tensorizer_config):
|
|
|
|
mock_linear_method = MagicMock()
|
|
|
|
mock_agent_instance = mock_agent.return_value
|
|
|
|
mock_agent_instance.deserialize.return_value = MagicMock()
|
|
|
|
|
|
|
|
result = load_with_tensorizer(tensorizer_config,
|
2024-04-26 13:41:14 -07:00
|
|
|
quant_method=mock_linear_method)
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
mock_agent.assert_called_once_with(tensorizer_config,
|
2024-04-26 13:41:14 -07:00
|
|
|
quant_method=mock_linear_method)
|
2024-04-13 20:13:01 -04:00
|
|
|
mock_agent_instance.deserialize.assert_called_once()
|
|
|
|
assert result == mock_agent_instance.deserialize.return_value
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.skipif(not is_curl_installed(), reason="cURL is not installed")
|
|
|
|
def test_can_deserialize_s3(vllm_runner):
|
|
|
|
model_ref = "EleutherAI/pythia-1.4b"
|
|
|
|
tensorized_path = f"s3://tensorized/{model_ref}/fp16/model.tensors"
|
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
with vllm_runner(model_ref,
|
2024-07-30 14:48:50 -04:00
|
|
|
load_format="tensorizer",
|
|
|
|
model_loader_extra_config=TensorizerConfig(
|
|
|
|
tensorizer_uri=tensorized_path,
|
|
|
|
num_readers=1,
|
|
|
|
s3_endpoint="object.ord1.coreweave.com",
|
|
|
|
)) as loaded_hf_model:
|
2024-11-15 17:34:17 +08:00
|
|
|
deserialized_outputs = loaded_hf_model.generate(
|
|
|
|
prompts, sampling_params)
|
2024-07-30 14:48:50 -04:00
|
|
|
# noqa: E501
|
2024-04-13 20:13:01 -04:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
assert deserialized_outputs
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.skipif(not is_curl_installed(), reason="cURL is not installed")
|
|
|
|
def test_deserialized_encrypted_vllm_model_has_same_outputs(
|
|
|
|
vllm_runner, tmp_path):
|
2024-06-08 01:59:20 -07:00
|
|
|
with vllm_runner(model_ref) as vllm_model:
|
|
|
|
model_path = tmp_path / (model_ref + ".tensors")
|
|
|
|
key_path = tmp_path / (model_ref + ".key")
|
2024-06-12 15:13:52 -06:00
|
|
|
write_keyfile(key_path)
|
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
outputs = vllm_model.generate(prompts, sampling_params)
|
2024-05-13 17:57:07 -04:00
|
|
|
|
2024-11-15 17:34:17 +08:00
|
|
|
config_for_serializing = TensorizerConfig(tensorizer_uri=model_path,
|
|
|
|
encryption_keyfile=key_path)
|
2025-01-20 15:00:59 +08:00
|
|
|
|
|
|
|
vllm_model.apply_model(
|
|
|
|
partial(serialize_vllm_model,
|
|
|
|
tensorizer_config=config_for_serializing))
|
2024-05-13 17:57:07 -04:00
|
|
|
|
|
|
|
config_for_deserializing = TensorizerConfig(tensorizer_uri=model_path,
|
|
|
|
encryption_keyfile=key_path)
|
|
|
|
|
2024-11-15 17:34:17 +08:00
|
|
|
with vllm_runner(model_ref,
|
|
|
|
load_format="tensorizer",
|
|
|
|
model_loader_extra_config=config_for_deserializing
|
|
|
|
) as loaded_vllm_model: # noqa: E501
|
2024-04-13 20:13:01 -04:00
|
|
|
|
2024-11-15 17:34:17 +08:00
|
|
|
deserialized_outputs = loaded_vllm_model.generate(
|
|
|
|
prompts, sampling_params)
|
2024-07-30 14:48:50 -04:00
|
|
|
# noqa: E501
|
2024-04-13 20:13:01 -04:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
assert outputs == deserialized_outputs
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
|
|
|
|
def test_deserialized_hf_model_has_same_outputs(hf_runner, vllm_runner,
|
|
|
|
tmp_path):
|
2024-06-07 22:31:32 -07:00
|
|
|
with hf_runner(model_ref) as hf_model:
|
|
|
|
model_path = tmp_path / (model_ref + ".tensors")
|
|
|
|
max_tokens = 50
|
|
|
|
outputs = hf_model.generate_greedy(prompts, max_tokens=max_tokens)
|
|
|
|
with open_stream(model_path, "wb+") as stream:
|
|
|
|
serializer = TensorSerializer(stream)
|
|
|
|
serializer.write_module(hf_model.model)
|
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
with vllm_runner(model_ref,
|
2024-07-30 14:48:50 -04:00
|
|
|
load_format="tensorizer",
|
|
|
|
model_loader_extra_config=TensorizerConfig(
|
|
|
|
tensorizer_uri=model_path,
|
|
|
|
num_readers=1,
|
|
|
|
)) as loaded_hf_model:
|
2024-06-08 01:59:20 -07:00
|
|
|
deserialized_outputs = loaded_hf_model.generate_greedy(
|
|
|
|
prompts, max_tokens=max_tokens)
|
2024-04-13 20:13:01 -04:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
assert outputs == deserialized_outputs
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
|
|
|
|
def test_vllm_model_can_load_with_lora(vllm_runner, tmp_path):
|
2024-11-15 17:34:17 +08:00
|
|
|
multilora_inference = import_from_path(
|
2025-01-08 13:09:53 +00:00
|
|
|
"examples.offline_inference.multilora_inference",
|
|
|
|
EXAMPLES_PATH / "offline_inference/multilora_inference.py",
|
2024-11-15 17:34:17 +08:00
|
|
|
)
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
model_ref = "meta-llama/Llama-2-7b-hf"
|
|
|
|
lora_path = snapshot_download(repo_id="yard1/llama-2-7b-sql-lora-test")
|
2024-11-15 17:34:17 +08:00
|
|
|
test_prompts = multilora_inference.create_test_prompts(lora_path)
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
# Serialize model before deserializing and binding LoRA adapters
|
2025-03-19 13:49:33 +08:00
|
|
|
with vllm_runner(model_ref) as vllm_model:
|
2024-06-08 01:59:20 -07:00
|
|
|
model_path = tmp_path / (model_ref + ".tensors")
|
2024-05-13 17:57:07 -04:00
|
|
|
|
2025-01-20 15:00:59 +08:00
|
|
|
vllm_model.apply_model(
|
|
|
|
partial(
|
|
|
|
serialize_vllm_model,
|
|
|
|
tensorizer_config=TensorizerConfig(tensorizer_uri=model_path)))
|
2024-05-13 17:57:07 -04:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
with vllm_runner(
|
2024-07-30 14:48:50 -04:00
|
|
|
model_ref,
|
|
|
|
load_format="tensorizer",
|
|
|
|
model_loader_extra_config=TensorizerConfig(
|
|
|
|
tensorizer_uri=model_path,
|
|
|
|
num_readers=1,
|
|
|
|
),
|
|
|
|
enable_lora=True,
|
|
|
|
max_loras=1,
|
|
|
|
max_lora_rank=8,
|
|
|
|
max_cpu_loras=2,
|
|
|
|
max_num_seqs=50,
|
|
|
|
max_model_len=1000,
|
2024-06-08 01:59:20 -07:00
|
|
|
) as loaded_vllm_model:
|
2024-11-15 17:34:17 +08:00
|
|
|
multilora_inference.process_requests(
|
|
|
|
loaded_vllm_model.model.llm_engine, test_prompts)
|
2024-04-13 20:13:01 -04:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
assert loaded_vllm_model
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
|
|
|
|
def test_load_without_tensorizer_load_format(vllm_runner):
|
2024-07-30 14:48:50 -04:00
|
|
|
model = None
|
2024-04-13 20:13:01 -04:00
|
|
|
with pytest.raises(ValueError):
|
2024-07-30 14:48:50 -04:00
|
|
|
model = vllm_runner(
|
2024-05-13 17:57:07 -04:00
|
|
|
model_ref,
|
|
|
|
model_loader_extra_config=TensorizerConfig(tensorizer_uri="test"))
|
2024-07-30 14:48:50 -04:00
|
|
|
del model
|
|
|
|
gc.collect()
|
|
|
|
torch.cuda.empty_cache()
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.skipif(not is_curl_installed(), reason="cURL is not installed")
|
2024-05-13 17:57:07 -04:00
|
|
|
def test_openai_apiserver_with_tensorizer(vllm_runner, tmp_path):
|
2024-04-13 20:13:01 -04:00
|
|
|
## Serialize model
|
2025-03-19 13:49:33 +08:00
|
|
|
with vllm_runner(model_ref) as vllm_model:
|
2024-06-08 01:59:20 -07:00
|
|
|
model_path = tmp_path / (model_ref + ".tensors")
|
2024-04-13 20:13:01 -04:00
|
|
|
|
2025-01-20 15:00:59 +08:00
|
|
|
vllm_model.apply_model(
|
|
|
|
partial(
|
|
|
|
serialize_vllm_model,
|
|
|
|
tensorizer_config=TensorizerConfig(tensorizer_uri=model_path)))
|
2024-04-13 20:13:01 -04:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
model_loader_extra_config = {
|
|
|
|
"tensorizer_uri": str(model_path),
|
|
|
|
}
|
2024-05-13 17:57:07 -04:00
|
|
|
|
2024-04-13 20:13:01 -04:00
|
|
|
## Start OpenAI API server
|
|
|
|
openai_args = [
|
2024-11-15 17:34:17 +08:00
|
|
|
"--dtype",
|
|
|
|
"float16",
|
|
|
|
"--load-format",
|
|
|
|
"tensorizer",
|
|
|
|
"--model-loader-extra-config",
|
2024-06-14 02:21:53 +08:00
|
|
|
json.dumps(model_loader_extra_config),
|
2024-04-13 20:13:01 -04:00
|
|
|
]
|
|
|
|
|
2024-07-17 15:43:21 +08:00
|
|
|
with RemoteOpenAIServer(model_ref, openai_args) as server:
|
2024-07-12 21:51:48 -07:00
|
|
|
print("Server ready.")
|
2024-07-02 10:58:08 -07:00
|
|
|
|
2024-07-12 21:51:48 -07:00
|
|
|
client = server.get_client()
|
|
|
|
completion = client.completions.create(model=model_ref,
|
2024-07-30 14:48:50 -04:00
|
|
|
prompt="Hello, my name is",
|
|
|
|
max_tokens=5,
|
|
|
|
temperature=0.0)
|
2024-04-16 11:34:39 -07:00
|
|
|
|
2024-07-12 21:51:48 -07:00
|
|
|
assert completion.id is not None
|
|
|
|
assert len(completion.choices) == 1
|
|
|
|
assert len(completion.choices[0].text) >= 5
|
|
|
|
assert completion.choices[0].finish_reason == "length"
|
|
|
|
assert completion.usage == openai.types.CompletionUsage(
|
|
|
|
completion_tokens=5, prompt_tokens=6, total_tokens=11)
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
|
|
|
|
def test_raise_value_error_on_invalid_load_format(vllm_runner):
|
2024-07-30 14:48:50 -04:00
|
|
|
model = None
|
2024-04-13 20:13:01 -04:00
|
|
|
with pytest.raises(ValueError):
|
2024-07-30 14:48:50 -04:00
|
|
|
model = vllm_runner(
|
2024-05-13 17:57:07 -04:00
|
|
|
model_ref,
|
|
|
|
load_format="safetensors",
|
|
|
|
model_loader_extra_config=TensorizerConfig(tensorizer_uri="test"))
|
2024-07-30 14:48:50 -04:00
|
|
|
del model
|
|
|
|
gc.collect()
|
|
|
|
torch.cuda.empty_cache()
|
2024-04-13 20:13:01 -04:00
|
|
|
|
|
|
|
|
2024-11-15 17:34:17 +08:00
|
|
|
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires 2 GPUs")
|
2024-06-12 15:13:52 -06:00
|
|
|
def test_tensorizer_with_tp_path_without_template(vllm_runner):
|
2024-04-13 20:13:01 -04:00
|
|
|
with pytest.raises(ValueError):
|
|
|
|
model_ref = "EleutherAI/pythia-1.4b"
|
|
|
|
tensorized_path = f"s3://tensorized/{model_ref}/fp16/model.tensors"
|
|
|
|
|
|
|
|
vllm_runner(
|
|
|
|
model_ref,
|
|
|
|
load_format="tensorizer",
|
2024-04-16 11:34:39 -07:00
|
|
|
model_loader_extra_config=TensorizerConfig(
|
|
|
|
tensorizer_uri=tensorized_path,
|
|
|
|
num_readers=1,
|
|
|
|
s3_endpoint="object.ord1.coreweave.com",
|
|
|
|
),
|
2024-04-13 20:13:01 -04:00
|
|
|
tensor_parallel_size=2,
|
2024-06-12 15:13:52 -06:00
|
|
|
disable_custom_all_reduce=True,
|
2024-04-13 20:13:01 -04:00
|
|
|
)
|
2024-05-13 17:57:07 -04:00
|
|
|
|
2024-07-30 14:48:50 -04:00
|
|
|
|
2024-11-15 17:34:17 +08:00
|
|
|
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires 2 GPUs")
|
|
|
|
def test_deserialized_encrypted_vllm_model_with_tp_has_same_outputs(
|
|
|
|
vllm_runner, tmp_path):
|
2024-06-12 15:13:52 -06:00
|
|
|
model_ref = "EleutherAI/pythia-1.4b"
|
|
|
|
# record outputs from un-sharded un-tensorized model
|
2024-07-30 14:48:50 -04:00
|
|
|
with vllm_runner(
|
|
|
|
model_ref,
|
|
|
|
disable_custom_all_reduce=True,
|
|
|
|
enforce_eager=True,
|
|
|
|
) as base_model:
|
|
|
|
outputs = base_model.generate(prompts, sampling_params)
|
|
|
|
base_model.model.llm_engine.model_executor.shutdown()
|
2024-06-12 15:13:52 -06:00
|
|
|
|
|
|
|
# load model with two shards and serialize with encryption
|
|
|
|
model_path = str(tmp_path / (model_ref + "-%02d.tensors"))
|
|
|
|
key_path = tmp_path / (model_ref + ".key")
|
|
|
|
|
|
|
|
tensorizer_config = TensorizerConfig(
|
|
|
|
tensorizer_uri=model_path,
|
|
|
|
encryption_keyfile=key_path,
|
|
|
|
)
|
|
|
|
|
|
|
|
tensorize_vllm_model(
|
|
|
|
engine_args=EngineArgs(
|
2024-07-30 14:48:50 -04:00
|
|
|
model=model_ref,
|
|
|
|
tensor_parallel_size=2,
|
|
|
|
disable_custom_all_reduce=True,
|
|
|
|
enforce_eager=True,
|
|
|
|
),
|
2024-06-12 15:13:52 -06:00
|
|
|
tensorizer_config=tensorizer_config,
|
|
|
|
)
|
|
|
|
assert os.path.isfile(model_path % 0), "Serialization subprocess failed"
|
|
|
|
assert os.path.isfile(model_path % 1), "Serialization subprocess failed"
|
|
|
|
|
2024-07-30 14:48:50 -04:00
|
|
|
with vllm_runner(
|
|
|
|
model_ref,
|
|
|
|
tensor_parallel_size=2,
|
|
|
|
load_format="tensorizer",
|
|
|
|
disable_custom_all_reduce=True,
|
|
|
|
enforce_eager=True,
|
|
|
|
model_loader_extra_config=tensorizer_config) as loaded_vllm_model:
|
2024-11-15 17:34:17 +08:00
|
|
|
deserialized_outputs = loaded_vllm_model.generate(
|
|
|
|
prompts, sampling_params)
|
2024-06-12 15:13:52 -06:00
|
|
|
|
|
|
|
assert outputs == deserialized_outputs
|
|
|
|
|
2024-05-13 17:57:07 -04:00
|
|
|
|
2024-07-30 14:48:50 -04:00
|
|
|
@retry_until_skip(3)
|
2024-05-13 17:57:07 -04:00
|
|
|
def test_vllm_tensorized_model_has_same_outputs(vllm_runner, tmp_path):
|
2024-07-30 14:48:50 -04:00
|
|
|
gc.collect()
|
|
|
|
torch.cuda.empty_cache()
|
2024-05-13 17:57:07 -04:00
|
|
|
model_ref = "facebook/opt-125m"
|
|
|
|
model_path = tmp_path / (model_ref + ".tensors")
|
|
|
|
config = TensorizerConfig(tensorizer_uri=str(model_path))
|
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
with vllm_runner(model_ref) as vllm_model:
|
|
|
|
outputs = vllm_model.generate(prompts, sampling_params)
|
2025-01-20 15:00:59 +08:00
|
|
|
|
|
|
|
vllm_model.apply_model(
|
|
|
|
partial(serialize_vllm_model, tensorizer_config=config))
|
2024-05-13 17:57:07 -04:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
assert is_vllm_tensorized(config)
|
2024-05-13 17:57:07 -04:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
with vllm_runner(model_ref,
|
2024-07-30 14:48:50 -04:00
|
|
|
load_format="tensorizer",
|
|
|
|
model_loader_extra_config=config) as loaded_vllm_model:
|
2024-11-15 17:34:17 +08:00
|
|
|
deserialized_outputs = loaded_vllm_model.generate(
|
|
|
|
prompts, sampling_params)
|
2024-07-30 14:48:50 -04:00
|
|
|
# noqa: E501
|
2024-05-13 17:57:07 -04:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
assert outputs == deserialized_outputs
|