diff --git a/tests/samplers/test_ignore_eos.py b/tests/samplers/test_ignore_eos.py index 864657a3..67b5168b 100644 --- a/tests/samplers/test_ignore_eos.py +++ b/tests/samplers/test_ignore_eos.py @@ -7,25 +7,26 @@ import pytest from vllm import SamplingParams -MODELS = ["facebook/opt-125m"] +# We also test with llama because it has generation_config to specify EOS +# (past regression). +MODELS = ["facebook/opt-125m", "meta-llama/Llama-2-7b-hf"] @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", ["half"]) -@pytest.mark.parametrize("max_tokens", [1024]) -def test_beam_search_single_input( +@pytest.mark.parametrize("max_tokens", [512]) +def test_ignore_eos( vllm_runner, example_prompts, model: str, dtype: str, max_tokens: int, ) -> None: - example_prompts = "1 + 1 is" - vllm_model = vllm_runner(model, dtype=dtype) sampling_params = SamplingParams(max_tokens=max_tokens, ignore_eos=True) - ignore_eos_output = vllm_model.model.generate( - example_prompts, sampling_params=sampling_params) - print(len(ignore_eos_output[0].outputs[0].token_ids)) - assert max_tokens - len(ignore_eos_output[0].outputs[0].token_ids) < 10 - assert max_tokens - len(ignore_eos_output[0].outputs[0].token_ids) >= 0 + + for prompt in example_prompts: + ignore_eos_output = vllm_model.model.generate( + prompt, sampling_params=sampling_params) + output_length = len(ignore_eos_output[0].outputs[0].token_ids) + assert output_length == max_tokens