
Removing the block manager v1. This is the initial piece of prefix-caching-centric design. In order to achieve prefix-caching-centric design, we need to simplify the code path so that we only use v2 block manager (which has much higher performance on prefix caching).
60 lines
1.7 KiB
Python
60 lines
1.7 KiB
Python
"""Compare the with and without prefix caching.
|
|
|
|
Run `pytest tests/prefix_caching/test_prefix_caching.py`.
|
|
"""
|
|
import pytest
|
|
|
|
from tests.kernels.utils import override_backend_env_variable
|
|
|
|
from ..models.utils import check_outputs_equal
|
|
|
|
MODELS = [
|
|
"facebook/opt-125m",
|
|
]
|
|
|
|
|
|
@pytest.mark.parametrize("model", MODELS)
|
|
@pytest.mark.parametrize("backend", ["FLASH_ATTN", "FLASHINFER", "XFORMERS"])
|
|
@pytest.mark.parametrize("dtype", ["half"])
|
|
@pytest.mark.parametrize("max_tokens", [5])
|
|
@pytest.mark.parametrize("cached_position", [0, 1])
|
|
def test_mixed_requests(
|
|
hf_runner,
|
|
vllm_runner,
|
|
example_prompts,
|
|
model: str,
|
|
backend: str,
|
|
dtype: str,
|
|
max_tokens: int,
|
|
cached_position: int,
|
|
monkeypatch,
|
|
) -> None:
|
|
"""
|
|
Test the case when some sequences have the prefix cache hit
|
|
and the others don't. The cached position determines where
|
|
the sequence is at among the batch of prefills.
|
|
"""
|
|
override_backend_env_variable(monkeypatch, backend)
|
|
|
|
with hf_runner(model, dtype=dtype) as hf_model:
|
|
hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens)
|
|
|
|
cached_prompt = example_prompts[cached_position]
|
|
with vllm_runner(
|
|
model,
|
|
dtype=dtype,
|
|
enable_prefix_caching=True,
|
|
) as vllm_model:
|
|
# Run the first prompt so the cache is populated
|
|
vllm_outputs = vllm_model.generate_greedy([cached_prompt], max_tokens)
|
|
|
|
# Run all the promopts
|
|
vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens)
|
|
|
|
check_outputs_equal(
|
|
outputs_0_lst=hf_outputs,
|
|
outputs_1_lst=vllm_outputs,
|
|
name_0="hf",
|
|
name_1="vllm",
|
|
)
|