2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2024-05-01 21:45:42 +09:00
|
|
|
"""Make sure ignore_eos works.
|
|
|
|
|
|
|
|
Run `pytest tests/samplers/test_ignore_eos.py`.
|
|
|
|
"""
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
from vllm import SamplingParams
|
|
|
|
|
2025-03-15 01:02:20 -04:00
|
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def v1(run_with_both_engines):
|
|
|
|
"""We can run both engines for this test."""
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2024-06-01 21:21:53 -05:00
|
|
|
# We also test with llama because it has generation_config to specify EOS
|
|
|
|
# (past regression).
|
2025-02-18 23:34:59 -08:00
|
|
|
MODELS = ["distilbert/distilgpt2", "meta-llama/Llama-3.2-1B"]
|
2024-05-01 21:45:42 +09:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("model", MODELS)
|
|
|
|
@pytest.mark.parametrize("dtype", ["half"])
|
2024-06-01 21:21:53 -05:00
|
|
|
@pytest.mark.parametrize("max_tokens", [512])
|
|
|
|
def test_ignore_eos(
|
2024-05-01 21:45:42 +09:00
|
|
|
vllm_runner,
|
|
|
|
example_prompts,
|
|
|
|
model: str,
|
|
|
|
dtype: str,
|
|
|
|
max_tokens: int,
|
|
|
|
) -> None:
|
2024-06-08 01:59:20 -07:00
|
|
|
with vllm_runner(model, dtype=dtype) as vllm_model:
|
|
|
|
sampling_params = SamplingParams(max_tokens=max_tokens,
|
|
|
|
ignore_eos=True)
|
2024-06-01 21:21:53 -05:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
for prompt in example_prompts:
|
|
|
|
ignore_eos_output = vllm_model.model.generate(
|
|
|
|
prompt, sampling_params=sampling_params)
|
|
|
|
output_length = len(ignore_eos_output[0].outputs[0].token_ids)
|
|
|
|
assert output_length == max_tokens
|