2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
2024-08-23 10:12:44 -03:00
|
|
|
import pytest
|
|
|
|
|
|
|
|
from vllm import LLM
|
|
|
|
|
|
|
|
|
2024-11-06 12:57:35 -07:00
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def v1(run_with_both_engines):
|
|
|
|
# Simple autouse wrapper to run both engines for each test
|
|
|
|
# This can be promoted up to conftest.py to run for every
|
|
|
|
# test in a package
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2024-08-23 10:12:44 -03:00
|
|
|
def test_empty_prompt():
|
2025-02-22 19:19:45 -08:00
|
|
|
llm = LLM(model="openai-community/gpt2", enforce_eager=True)
|
2025-04-09 00:45:21 +08:00
|
|
|
with pytest.raises(ValueError, match='decoder prompt cannot be empty'):
|
2024-08-23 10:12:44 -03:00
|
|
|
llm.generate([""])
|
2024-10-29 16:13:20 -05:00
|
|
|
|
|
|
|
|
2024-11-06 12:57:35 -07:00
|
|
|
@pytest.mark.skip_v1
|
2024-10-29 16:13:20 -05:00
|
|
|
def test_out_of_vocab_token():
|
2025-02-22 19:19:45 -08:00
|
|
|
llm = LLM(model="openai-community/gpt2", enforce_eager=True)
|
2024-10-29 16:13:20 -05:00
|
|
|
with pytest.raises(ValueError, match='out of vocabulary'):
|
|
|
|
llm.generate({"prompt_token_ids": [999999]})
|