2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
2024-04-21 15:06:46 -07:00
|
|
|
import pytest
|
|
|
|
|
2025-02-18 23:34:59 -08:00
|
|
|
from vllm.config import LoadFormat
|
2024-04-21 15:06:46 -07:00
|
|
|
from vllm.entrypoints.llm import LLM
|
|
|
|
from vllm.sampling_params import SamplingParams
|
|
|
|
|
2025-02-18 23:34:59 -08:00
|
|
|
from ..conftest import MODEL_WEIGHTS_S3_BUCKET
|
2024-04-21 15:06:46 -07:00
|
|
|
|
2025-02-18 23:34:59 -08:00
|
|
|
|
|
|
|
@pytest.mark.parametrize("model", [f"{MODEL_WEIGHTS_S3_BUCKET}/distilgpt2"])
|
2024-04-21 15:06:46 -07:00
|
|
|
def test_skip_tokenizer_initialization(model: str):
|
|
|
|
# This test checks if the flag skip_tokenizer_init skips the initialization
|
|
|
|
# of tokenizer and detokenizer. The generated output is expected to contain
|
|
|
|
# token ids.
|
2025-02-18 23:34:59 -08:00
|
|
|
llm = LLM(model=model,
|
|
|
|
skip_tokenizer_init=True,
|
|
|
|
load_format=LoadFormat.RUNAI_STREAMER)
|
2024-04-21 15:06:46 -07:00
|
|
|
sampling_params = SamplingParams(prompt_logprobs=True, detokenize=True)
|
2024-09-13 10:56:13 +08:00
|
|
|
|
|
|
|
with pytest.raises(ValueError, match="cannot pass text prompts when"):
|
2024-04-21 15:06:46 -07:00
|
|
|
llm.generate("abc", sampling_params)
|
2024-09-13 10:56:13 +08:00
|
|
|
|
2024-05-29 04:29:31 +08:00
|
|
|
outputs = llm.generate({"prompt_token_ids": [1, 2, 3]},
|
2024-04-21 15:06:46 -07:00
|
|
|
sampling_params=sampling_params)
|
|
|
|
assert len(outputs) > 0
|
|
|
|
completions = outputs[0].outputs
|
|
|
|
assert len(completions) > 0
|
|
|
|
assert completions[0].text == ""
|
|
|
|
assert completions[0].token_ids
|