2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2024-03-25 17:31:32 -07:00
|
|
|
"""Test the different finish_reason="stop" situations during generation:
|
|
|
|
1. One of the provided stop strings
|
|
|
|
2. One of the provided stop tokens
|
|
|
|
3. The EOS token
|
|
|
|
|
2024-04-11 23:34:12 +01:00
|
|
|
Run `pytest tests/engine/test_stop_reason.py`.
|
2024-03-25 17:31:32 -07:00
|
|
|
"""
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
import transformers
|
|
|
|
|
|
|
|
from vllm import SamplingParams
|
|
|
|
|
2025-02-18 23:34:59 -08:00
|
|
|
MODEL = "distilbert/distilgpt2"
|
2024-03-25 17:31:32 -07:00
|
|
|
STOP_STR = "."
|
|
|
|
SEED = 42
|
|
|
|
MAX_TOKENS = 1024
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def vllm_model(vllm_runner):
|
2024-06-08 01:59:20 -07:00
|
|
|
with vllm_runner(MODEL) as vllm_model:
|
|
|
|
yield vllm_model
|
2024-03-25 17:31:32 -07:00
|
|
|
|
|
|
|
|
|
|
|
def test_stop_reason(vllm_model, example_prompts):
|
|
|
|
tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL)
|
|
|
|
stop_token_id = tokenizer.convert_tokens_to_ids(STOP_STR)
|
|
|
|
llm = vllm_model.model
|
|
|
|
|
|
|
|
# test stop token
|
|
|
|
outputs = llm.generate(example_prompts,
|
|
|
|
sampling_params=SamplingParams(
|
2024-05-16 22:58:25 -05:00
|
|
|
ignore_eos=True,
|
2024-03-25 17:31:32 -07:00
|
|
|
seed=SEED,
|
|
|
|
max_tokens=MAX_TOKENS,
|
|
|
|
stop_token_ids=[stop_token_id]))
|
|
|
|
for output in outputs:
|
|
|
|
output = output.outputs[0]
|
|
|
|
assert output.finish_reason == "stop"
|
|
|
|
assert output.stop_reason == stop_token_id
|
|
|
|
|
|
|
|
# test stop string
|
|
|
|
outputs = llm.generate(example_prompts,
|
|
|
|
sampling_params=SamplingParams(
|
2024-05-16 22:58:25 -05:00
|
|
|
ignore_eos=True,
|
|
|
|
seed=SEED,
|
|
|
|
max_tokens=MAX_TOKENS,
|
|
|
|
stop="."))
|
2024-03-25 17:31:32 -07:00
|
|
|
for output in outputs:
|
|
|
|
output = output.outputs[0]
|
|
|
|
assert output.finish_reason == "stop"
|
|
|
|
assert output.stop_reason == STOP_STR
|
|
|
|
|
|
|
|
# test EOS token
|
|
|
|
outputs = llm.generate(example_prompts,
|
|
|
|
sampling_params=SamplingParams(
|
|
|
|
seed=SEED, max_tokens=MAX_TOKENS))
|
|
|
|
for output in outputs:
|
|
|
|
output = output.outputs[0]
|
|
|
|
assert output.finish_reason == "length" or (
|
|
|
|
output.finish_reason == "stop" and output.stop_reason is None)
|