2024-03-25 23:59:47 +09:00
|
|
|
import openai # use the official client for correctness check
|
2024-01-17 05:33:14 +00:00
|
|
|
import pytest
|
2024-03-10 19:49:14 -07:00
|
|
|
# using Ray for overall ease of process management, parallel requests,
|
|
|
|
# and debugging.
|
|
|
|
import ray
|
|
|
|
# downloading lora to test lora requests
|
|
|
|
from huggingface_hub import snapshot_download
|
2024-02-26 19:51:53 -08:00
|
|
|
|
2024-07-02 10:58:08 -07:00
|
|
|
from ...utils import VLLM_PATH, RemoteOpenAIServer
|
2024-05-13 22:50:09 +08:00
|
|
|
|
2024-03-10 19:49:14 -07:00
|
|
|
# any model with a chat template should work here
|
|
|
|
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
|
|
|
|
# technically this needs Mistral-7B-v0.1 as base, but we're not testing
|
|
|
|
# generation quality here
|
|
|
|
LORA_NAME = "typeof/zephyr-7b-beta-lora"
|
2024-01-17 05:33:14 +00:00
|
|
|
|
|
|
|
|
2024-06-28 22:59:18 +08:00
|
|
|
@pytest.fixture(scope="module")
|
2024-02-17 15:00:48 -05:00
|
|
|
def zephyr_lora_files():
|
|
|
|
return snapshot_download(repo_id=LORA_NAME)
|
|
|
|
|
|
|
|
|
2024-05-11 11:30:37 -07:00
|
|
|
@pytest.fixture(scope="module")
|
2024-06-14 02:21:53 +08:00
|
|
|
def ray_ctx():
|
2024-07-02 10:58:08 -07:00
|
|
|
ray.init(runtime_env={"working_dir": VLLM_PATH})
|
2024-06-14 02:21:53 +08:00
|
|
|
yield
|
|
|
|
ray.shutdown()
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def server(zephyr_lora_files, ray_ctx):
|
|
|
|
return RemoteOpenAIServer([
|
2024-01-17 05:33:14 +00:00
|
|
|
"--model",
|
|
|
|
MODEL_NAME,
|
2024-03-10 19:49:14 -07:00
|
|
|
# use half precision for speed and memory savings in CI environment
|
2024-01-17 05:33:14 +00:00
|
|
|
"--dtype",
|
2024-03-10 19:49:14 -07:00
|
|
|
"bfloat16",
|
2024-01-17 05:33:14 +00:00
|
|
|
"--max-model-len",
|
2024-01-24 17:11:07 -08:00
|
|
|
"8192",
|
|
|
|
"--enforce-eager",
|
2024-02-17 15:00:48 -05:00
|
|
|
# lora config below
|
|
|
|
"--enable-lora",
|
|
|
|
"--lora-modules",
|
|
|
|
f"zephyr-lora={zephyr_lora_files}",
|
|
|
|
f"zephyr-lora2={zephyr_lora_files}",
|
|
|
|
"--max-lora-rank",
|
|
|
|
"64",
|
|
|
|
"--max-cpu-loras",
|
|
|
|
"2",
|
|
|
|
"--max-num-seqs",
|
2024-04-11 09:56:48 +09:00
|
|
|
"128",
|
2024-01-17 05:33:14 +00:00
|
|
|
])
|
|
|
|
|
|
|
|
|
2024-05-11 11:30:37 -07:00
|
|
|
@pytest.fixture(scope="module")
|
2024-06-14 02:21:53 +08:00
|
|
|
def client(server):
|
|
|
|
return server.get_async_client()
|
2024-01-17 05:33:14 +00:00
|
|
|
|
|
|
|
|
2024-06-27 20:43:17 +08:00
|
|
|
@pytest.mark.asyncio
|
2024-06-14 02:21:53 +08:00
|
|
|
async def test_check_models(client: openai.AsyncOpenAI):
|
2024-02-17 15:00:48 -05:00
|
|
|
models = await client.models.list()
|
|
|
|
models = models.data
|
|
|
|
served_model = models[0]
|
|
|
|
lora_models = models[1:]
|
|
|
|
assert served_model.id == MODEL_NAME
|
|
|
|
assert all(model.root == MODEL_NAME for model in models)
|
|
|
|
assert lora_models[0].id == "zephyr-lora"
|
|
|
|
assert lora_models[1].id == "zephyr-lora2"
|