2025-02-04 02:43:58 -08:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2025-03-03 01:34:51 +00:00
|
|
|
from typing import Optional
|
2025-02-04 02:43:58 -08:00
|
|
|
|
2025-03-07 20:48:12 -05:00
|
|
|
import pytest
|
2025-04-01 15:33:17 +08:00
|
|
|
import torch
|
2025-03-07 20:48:12 -05:00
|
|
|
|
2025-03-07 10:19:11 -05:00
|
|
|
from vllm.config import CacheConfig, ModelConfig, SchedulerConfig, VllmConfig
|
2025-02-04 02:43:58 -08:00
|
|
|
from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange
|
|
|
|
from vllm.sampling_params import SamplingParams
|
2025-03-20 17:50:43 -07:00
|
|
|
from vllm.v1.core.sched.output import SchedulerOutput
|
|
|
|
from vllm.v1.core.sched.scheduler import Scheduler
|
2025-04-01 15:33:17 +08:00
|
|
|
from vllm.v1.kv_cache_interface import (FullAttentionSpec, KVCacheConfig,
|
|
|
|
KVCacheGroupSpec)
|
2025-02-04 02:43:58 -08:00
|
|
|
from vllm.v1.outputs import ModelRunnerOutput
|
|
|
|
from vllm.v1.request import Request, RequestStatus
|
2025-03-07 10:19:11 -05:00
|
|
|
from vllm.v1.structured_output import StructuredOutputManager
|
2025-02-04 02:43:58 -08:00
|
|
|
|
2025-02-15 18:05:11 -08:00
|
|
|
EOS_TOKEN_ID = 50256
|
|
|
|
|
2025-02-04 02:43:58 -08:00
|
|
|
|
|
|
|
def create_scheduler(
|
|
|
|
model: str = "facebook/opt-125m",
|
|
|
|
max_num_seqs: int = 16,
|
|
|
|
max_num_batched_tokens: int = 8192,
|
2025-03-07 20:48:12 -05:00
|
|
|
enable_prefix_caching: Optional[bool] = None,
|
2025-03-25 14:22:26 -07:00
|
|
|
long_prefill_token_threshold: int = 0,
|
2025-02-04 02:43:58 -08:00
|
|
|
) -> Scheduler:
|
2025-03-07 20:48:12 -05:00
|
|
|
'''Create scheduler under test.
|
2025-03-25 14:22:26 -07:00
|
|
|
|
2025-03-07 20:48:12 -05:00
|
|
|
Args:
|
|
|
|
model: model under test
|
|
|
|
max_num_seqs: max sequences to schedule
|
|
|
|
max_num_batch_tokens: max num tokens to batch
|
|
|
|
enable_prefix_caching: optionally force APC config
|
|
|
|
(True/False) or use default
|
|
|
|
(None)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
:class:`Scheduler` instance
|
|
|
|
'''
|
2025-02-04 02:43:58 -08:00
|
|
|
scheduler_config = SchedulerConfig(
|
|
|
|
max_num_seqs=max_num_seqs,
|
|
|
|
max_num_batched_tokens=max_num_batched_tokens,
|
|
|
|
max_model_len=max_num_batched_tokens,
|
2025-03-25 14:22:26 -07:00
|
|
|
long_prefill_token_threshold=long_prefill_token_threshold,
|
2025-02-04 02:43:58 -08:00
|
|
|
)
|
|
|
|
model_config = ModelConfig(
|
|
|
|
model=model,
|
|
|
|
task="auto",
|
|
|
|
tokenizer=model,
|
|
|
|
tokenizer_mode="auto",
|
|
|
|
trust_remote_code=True,
|
|
|
|
dtype="float16",
|
|
|
|
seed=42,
|
|
|
|
)
|
2025-03-07 20:48:12 -05:00
|
|
|
# Cache config, optionally force APC
|
|
|
|
kwargs_cache = ({} if enable_prefix_caching is None else {
|
|
|
|
'enable_prefix_caching': enable_prefix_caching
|
|
|
|
})
|
2025-02-04 02:43:58 -08:00
|
|
|
cache_config = CacheConfig(
|
|
|
|
block_size=16,
|
|
|
|
gpu_memory_utilization=0.9,
|
|
|
|
swap_space=0,
|
|
|
|
cache_dtype="auto",
|
2025-03-07 20:48:12 -05:00
|
|
|
**kwargs_cache,
|
2025-02-04 02:43:58 -08:00
|
|
|
)
|
2025-03-07 10:19:11 -05:00
|
|
|
vllm_config = VllmConfig(
|
|
|
|
scheduler_config=scheduler_config,
|
|
|
|
model_config=model_config,
|
|
|
|
cache_config=cache_config,
|
|
|
|
)
|
2025-04-01 15:33:17 +08:00
|
|
|
kv_cache_config = KVCacheConfig(
|
|
|
|
num_blocks=10000, # A large number of blocks to hold all requests
|
|
|
|
tensors={},
|
|
|
|
kv_cache_groups=[
|
|
|
|
KVCacheGroupSpec(['layer'],
|
|
|
|
FullAttentionSpec(16, 1, 1, torch.float32, False))
|
|
|
|
],
|
|
|
|
)
|
2025-02-04 02:43:58 -08:00
|
|
|
cache_config.num_gpu_blocks = 10000
|
2025-03-07 10:19:11 -05:00
|
|
|
return Scheduler(
|
|
|
|
scheduler_config,
|
|
|
|
model_config,
|
|
|
|
cache_config,
|
|
|
|
lora_config=None,
|
2025-04-01 15:33:17 +08:00
|
|
|
kv_cache_config=kv_cache_config,
|
2025-03-07 10:19:11 -05:00
|
|
|
log_stats=True,
|
|
|
|
structured_output_manager=StructuredOutputManager(vllm_config),
|
|
|
|
)
|
2025-02-04 02:43:58 -08:00
|
|
|
|
|
|
|
|
2025-03-07 20:48:12 -05:00
|
|
|
def create_requests(num_requests: int,
|
|
|
|
num_tokens: int = 10,
|
|
|
|
mm_positions: Optional[list[PlaceholderRange]] = None,
|
|
|
|
max_tokens: int = 16,
|
|
|
|
stop_token_ids: Optional[list[int]] = None,
|
|
|
|
prompt_logprobs: Optional[int] = None):
|
2025-02-15 18:05:11 -08:00
|
|
|
sampling_params = SamplingParams(ignore_eos=False,
|
|
|
|
max_tokens=max_tokens,
|
2025-03-07 20:48:12 -05:00
|
|
|
stop_token_ids=stop_token_ids,
|
|
|
|
prompt_logprobs=prompt_logprobs)
|
2025-02-04 02:43:58 -08:00
|
|
|
requests = []
|
|
|
|
for i in range(num_requests):
|
|
|
|
if mm_positions is not None:
|
|
|
|
mm_position = mm_positions[i]
|
|
|
|
mm_inputs = [MultiModalKwargs({})] * len(mm_position)
|
|
|
|
else:
|
|
|
|
mm_position = None
|
|
|
|
mm_inputs = None
|
|
|
|
request = Request(
|
|
|
|
request_id=f"{i}",
|
|
|
|
prompt=None,
|
|
|
|
prompt_token_ids=[i] * num_tokens,
|
|
|
|
sampling_params=sampling_params,
|
|
|
|
multi_modal_inputs=mm_inputs,
|
|
|
|
multi_modal_placeholders=mm_position,
|
|
|
|
multi_modal_hashes=None,
|
2025-02-15 18:05:11 -08:00
|
|
|
eos_token_id=EOS_TOKEN_ID,
|
2025-02-04 02:43:58 -08:00
|
|
|
arrival_time=0,
|
|
|
|
)
|
|
|
|
requests.append(request)
|
|
|
|
return requests
|
|
|
|
|
|
|
|
|
|
|
|
def test_add_requests():
|
|
|
|
scheduler = create_scheduler()
|
|
|
|
requests = create_requests(num_requests=10)
|
|
|
|
|
|
|
|
for i, request in enumerate(requests):
|
|
|
|
scheduler.add_request(request)
|
|
|
|
assert request.request_id in scheduler.requests
|
|
|
|
assert len(scheduler.waiting) == i + 1
|
|
|
|
|
|
|
|
|
|
|
|
def test_finish_request():
|
|
|
|
scheduler = create_scheduler()
|
|
|
|
requests = create_requests(num_requests=10)
|
|
|
|
for request in requests:
|
|
|
|
scheduler.add_request(request)
|
|
|
|
|
|
|
|
for i, request in enumerate(requests):
|
|
|
|
scheduler.finish_requests(request.request_id,
|
|
|
|
RequestStatus.FINISHED_ABORTED)
|
|
|
|
assert request.request_id not in scheduler.requests
|
|
|
|
assert len(scheduler.waiting) == 9 - i
|
|
|
|
|
|
|
|
|
|
|
|
def test_get_num_unfinished_requests():
|
|
|
|
scheduler = create_scheduler()
|
|
|
|
requests = create_requests(num_requests=10)
|
|
|
|
for request in requests:
|
|
|
|
scheduler.add_request(request)
|
|
|
|
|
|
|
|
for i, request in enumerate(requests):
|
|
|
|
scheduler.finish_requests(request.request_id,
|
|
|
|
RequestStatus.FINISHED_STOPPED)
|
|
|
|
assert scheduler.get_num_unfinished_requests() == len(requests) - i - 1
|
|
|
|
|
|
|
|
|
2025-03-07 20:48:12 -05:00
|
|
|
@pytest.mark.parametrize("enable_prefix_caching, prompt_logprobs", [
|
|
|
|
(None, None),
|
|
|
|
(True, 5),
|
|
|
|
])
|
|
|
|
def test_schedule(enable_prefix_caching: Optional[bool],
|
|
|
|
prompt_logprobs: Optional[int]):
|
|
|
|
'''Test scheduling.
|
|
|
|
Two cases: default APC/no prompt logprobs; APC=True + prompt logprobs
|
|
|
|
'''
|
|
|
|
scheduler = create_scheduler(enable_prefix_caching=enable_prefix_caching)
|
|
|
|
requests = create_requests(num_requests=10,
|
|
|
|
prompt_logprobs=prompt_logprobs)
|
2025-02-04 02:43:58 -08:00
|
|
|
for request in requests:
|
|
|
|
scheduler.add_request(request)
|
|
|
|
|
|
|
|
# Test initial scheduling
|
|
|
|
output = scheduler.schedule()
|
|
|
|
assert len(output.scheduled_new_reqs) == len(requests)
|
|
|
|
assert len(output.scheduled_cached_reqs) == 0
|
|
|
|
assert len(output.finished_req_ids) == 0
|
|
|
|
# Verify all requests are scheduled.
|
|
|
|
for req_id, num_tokens in output.num_scheduled_tokens.items():
|
|
|
|
assert num_tokens == len(requests[int(req_id)].prompt_token_ids)
|
|
|
|
|
|
|
|
# Verify requests moved from waiting to running
|
|
|
|
assert len(scheduler.waiting) == 0
|
|
|
|
assert len(scheduler.running) == len(requests)
|
|
|
|
for i, request in enumerate(requests):
|
|
|
|
assert scheduler.running[i] == request
|
|
|
|
|
|
|
|
|
|
|
|
def test_schedule_multimodal_requests():
|
|
|
|
scheduler = create_scheduler(model="llava-hf/llava-1.5-7b-hf")
|
|
|
|
mm_positions = [[PlaceholderRange(offset=i, length=100)]
|
|
|
|
for i in range(10)]
|
|
|
|
requests = create_requests(
|
|
|
|
num_requests=10,
|
|
|
|
num_tokens=200,
|
|
|
|
mm_positions=mm_positions,
|
|
|
|
)
|
|
|
|
for request in requests:
|
|
|
|
scheduler.add_request(request)
|
|
|
|
|
|
|
|
output = scheduler.schedule()
|
|
|
|
assert len(output.scheduled_new_reqs) == len(requests)
|
|
|
|
assert len(output.scheduled_cached_reqs) == 0
|
|
|
|
assert len(output.finished_req_ids) == 0
|
|
|
|
for req_id, num_tokens in output.num_scheduled_tokens.items():
|
|
|
|
assert num_tokens == len(requests[int(req_id)].prompt_token_ids)
|
|
|
|
assert len(output.scheduled_encoder_inputs) == 10
|
|
|
|
for req_id, encoder_input in output.scheduled_encoder_inputs.items():
|
|
|
|
assert len(encoder_input) == 1
|
|
|
|
|
|
|
|
|
|
|
|
def test_schedule_partial_requests():
|
|
|
|
"""Test scheduling behavior with partial requests.
|
|
|
|
|
|
|
|
This test verifies that:
|
|
|
|
1. The scheduler can handle multiple partial requests in a single step when
|
|
|
|
constrained by encoder budget.
|
|
|
|
2. A request in RUNNING state may be unscheduled in subsequent steps if
|
|
|
|
there is insufficient encoder budget.
|
|
|
|
"""
|
|
|
|
scheduler = create_scheduler(
|
|
|
|
model="llava-hf/llava-1.5-7b-hf",
|
|
|
|
max_num_batched_tokens=1024,
|
|
|
|
)
|
|
|
|
mm_positions = [[PlaceholderRange(offset=100, length=600)]
|
|
|
|
for _ in range(3)]
|
|
|
|
requests = create_requests(
|
|
|
|
num_requests=3,
|
|
|
|
num_tokens=800,
|
|
|
|
mm_positions=mm_positions,
|
|
|
|
)
|
|
|
|
for request in requests:
|
|
|
|
scheduler.add_request(request)
|
|
|
|
|
|
|
|
output = scheduler.schedule()
|
|
|
|
assert len(output.scheduled_new_reqs) == 3
|
|
|
|
assert len(output.scheduled_cached_reqs) == 0
|
|
|
|
assert len(output.finished_req_ids) == 0
|
|
|
|
|
|
|
|
assert scheduler.max_num_encoder_input_tokens == 1024
|
|
|
|
# The first request is scheduled fully.
|
|
|
|
assert output.num_scheduled_tokens[requests[0].request_id] == 800
|
|
|
|
# The second request is scheduled partially.
|
|
|
|
# The <img> tokens are not scheduled because of the encoder budget.
|
|
|
|
assert output.num_scheduled_tokens[requests[1].request_id] == 100
|
|
|
|
# The third request is also scheduled partially.
|
|
|
|
# The <img> tokens are not scheduled because of the encoder budget.
|
|
|
|
assert output.num_scheduled_tokens[requests[2].request_id] == 100
|
|
|
|
req_to_index = {
|
|
|
|
request.request_id: i
|
|
|
|
for i, request in enumerate(requests)
|
|
|
|
}
|
|
|
|
model_runner_output = ModelRunnerOutput(
|
|
|
|
req_ids=[request.request_id for request in requests],
|
|
|
|
req_id_to_index=req_to_index,
|
2025-03-26 21:54:36 -07:00
|
|
|
# Only the first request has a sampled token id because
|
|
|
|
# the rest requests are still being prefilled.
|
|
|
|
sampled_token_ids=[[0], [], []],
|
2025-02-17 15:40:12 -08:00
|
|
|
spec_token_ids=None,
|
[V1] Logprobs and prompt logprobs support (#9880)
This PR is adding support for sample logprobs & prompt logprobs to vLLM v1.
New behavior:
- During model execution, model runner computes sample logprobs (if user-provided logprobs setting is not None) and prompt logprobs (if user-provided prompt_logprobs setting is not None). For both sample and prompt logprobs, the engine core returns 3 vectors: token ids, token logprob values, token ranks. Ranks reflect tokens' 1-indexed positions in the vocabulary vector after sorting the vocabulary by log probability in descending order.
- In scheduler.update_from_output(), sample and prompt logprobs are incorporated into the EngineCoreOutput data structure which is transferred to the engine client. If multiprocessing is enabled, then sample and prompt logprobs will be (de)serialized when the EngineCoreOutput data structure is (de)serialized.
- During output processing, the LogprobsProcessor transforms the triplet of token ids, token logprobs values, and token ranks into the OpenAI-compatible List[Dict[token id,Logprob]] format (for sample and prompt logprobs respectively.)
- Each Logprob instance (whether sample- or prompt-) consists of a token's log-probability, rank, and detokenized string representation. Note that logprob detokenization is handled by the LogprobsProcessor not the detokenizer.
Signed-off-by: Andrew Feldman <afeldman@neuralmagic.com>
Signed-off-by: Nick Hill <nhill@redhat.com>
Signed-off-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com>
Co-authored-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com>
Co-authored-by: Nick Hill <nhill@redhat.com>
2025-02-07 10:26:20 -05:00
|
|
|
logprobs=None,
|
|
|
|
prompt_logprobs_dict={},
|
2025-02-04 02:43:58 -08:00
|
|
|
)
|
|
|
|
scheduler.update_from_output(output, model_runner_output)
|
|
|
|
|
|
|
|
# Schedule the next step.
|
|
|
|
# Only the first and second requests are scheduled.
|
|
|
|
# The third request is in the RUNNING state but not scheduled in this step
|
|
|
|
# because of the encoder budget.
|
|
|
|
output = scheduler.schedule()
|
|
|
|
assert len(scheduler.running) == 3
|
|
|
|
assert len(output.scheduled_new_reqs) == 0
|
|
|
|
assert len(output.scheduled_cached_reqs) == 2
|
|
|
|
assert len(output.finished_req_ids) == 0
|
|
|
|
assert output.num_scheduled_tokens[requests[0].request_id] == 1
|
|
|
|
assert output.num_scheduled_tokens[requests[1].request_id] == 700
|
|
|
|
assert requests[2].request_id not in output.num_scheduled_tokens
|
2025-02-15 03:59:01 -08:00
|
|
|
|
|
|
|
|
2025-03-25 14:22:26 -07:00
|
|
|
@pytest.mark.parametrize("enable_prefix_caching", [True, False])
|
2025-03-26 21:54:36 -07:00
|
|
|
def test_schedule_concurrent_partial_requests(enable_prefix_caching: bool):
|
2025-03-25 14:22:26 -07:00
|
|
|
"""Test scheduling behavior with concurrent partial requests.
|
|
|
|
|
|
|
|
This test verifies that: there are multiple long prefill requests in the
|
|
|
|
RUNNING state, and we can schedule them together.
|
|
|
|
|
|
|
|
"""
|
|
|
|
scheduler = create_scheduler(
|
|
|
|
model="facebook/opt-125m",
|
|
|
|
max_num_batched_tokens=1024,
|
|
|
|
long_prefill_token_threshold=400,
|
|
|
|
enable_prefix_caching=enable_prefix_caching,
|
|
|
|
)
|
|
|
|
requests = create_requests(
|
|
|
|
num_requests=3,
|
|
|
|
num_tokens=800,
|
|
|
|
)
|
|
|
|
for request in requests:
|
|
|
|
scheduler.add_request(request)
|
|
|
|
|
|
|
|
output = scheduler.schedule()
|
|
|
|
assert len(output.scheduled_new_reqs) == 3
|
|
|
|
assert len(output.scheduled_cached_reqs) == 0
|
|
|
|
assert len(output.finished_req_ids) == 0
|
|
|
|
|
|
|
|
# The first request is scheduled partially - 400.
|
|
|
|
assert output.num_scheduled_tokens[requests[0].request_id] == 400
|
|
|
|
# The second request is scheduled partially - 400.
|
|
|
|
assert output.num_scheduled_tokens[requests[1].request_id] == 400
|
|
|
|
# The third request is also scheduled partially - 1024 - 400 - 400 = 224.
|
|
|
|
assert output.num_scheduled_tokens[requests[2].request_id] == 224
|
|
|
|
req_to_index = {
|
|
|
|
request.request_id: i
|
|
|
|
for i, request in enumerate(requests)
|
|
|
|
}
|
|
|
|
model_runner_output = ModelRunnerOutput(
|
|
|
|
req_ids=[request.request_id for request in requests],
|
|
|
|
req_id_to_index=req_to_index,
|
2025-03-26 21:54:36 -07:00
|
|
|
sampled_token_ids=[[] for _ in range(len(requests))],
|
2025-03-25 14:22:26 -07:00
|
|
|
spec_token_ids=None,
|
|
|
|
logprobs=None,
|
|
|
|
prompt_logprobs_dict={},
|
|
|
|
)
|
|
|
|
scheduler.update_from_output(output, model_runner_output)
|
|
|
|
|
|
|
|
# Schedule the next step. All three requests are running.
|
|
|
|
# Processed the remaining prefills of the first and second requests.
|
|
|
|
output1 = scheduler.schedule()
|
|
|
|
assert len(scheduler.running) == 3
|
|
|
|
assert len(output1.scheduled_new_reqs) == 0
|
|
|
|
assert len(output1.scheduled_cached_reqs) == 3
|
|
|
|
assert len(output1.finished_req_ids) == 0
|
|
|
|
assert output1.num_scheduled_tokens[requests[0].request_id] == 400
|
|
|
|
assert output1.num_scheduled_tokens[requests[1].request_id] == 400
|
|
|
|
assert output1.num_scheduled_tokens[requests[2].request_id] == 224
|
|
|
|
|
|
|
|
# Schedule the third step. All three requests are running.
|
|
|
|
# First and second requests are in the decode stage.
|
|
|
|
# All the remaining tokens in the third request are processed.
|
2025-03-26 21:54:36 -07:00
|
|
|
model_runner_output = ModelRunnerOutput(
|
|
|
|
req_ids=[request.request_id for request in requests],
|
|
|
|
req_id_to_index=req_to_index,
|
|
|
|
sampled_token_ids=[[0], [0]] + [[] for _ in range(len(requests) - 2)],
|
|
|
|
spec_token_ids=None,
|
|
|
|
logprobs=None,
|
|
|
|
prompt_logprobs_dict={},
|
|
|
|
)
|
2025-03-25 14:22:26 -07:00
|
|
|
scheduler.update_from_output(output1, model_runner_output)
|
|
|
|
output2 = scheduler.schedule()
|
|
|
|
assert len(scheduler.running) == 3
|
|
|
|
assert len(output2.scheduled_new_reqs) == 0
|
|
|
|
assert len(output2.scheduled_cached_reqs) == 3
|
|
|
|
assert len(output2.finished_req_ids) == 0
|
|
|
|
assert output2.num_scheduled_tokens[requests[0].request_id] == 1
|
|
|
|
assert output2.num_scheduled_tokens[requests[1].request_id] == 1
|
|
|
|
assert output2.num_scheduled_tokens[
|
|
|
|
requests[2].request_id] == 800 - 224 - 224
|
|
|
|
|
|
|
|
|
2025-02-15 18:05:11 -08:00
|
|
|
def test_stop_via_update_from_output():
|
|
|
|
"""Test stopping behavior through update_from_output"""
|
|
|
|
scheduler = create_scheduler()
|
|
|
|
|
|
|
|
# Test case 1: Stop on EOS token
|
|
|
|
requests = create_requests(num_requests=2, max_tokens=10)
|
|
|
|
for req in requests:
|
|
|
|
req.num_computed_tokens = req.num_tokens
|
|
|
|
scheduler.requests[req.request_id] = req
|
|
|
|
scheduler.running.append(req)
|
|
|
|
scheduler.scheduled_req_ids.add(req.request_id)
|
|
|
|
|
|
|
|
scheduler_output = SchedulerOutput(scheduled_new_reqs=[],
|
|
|
|
scheduled_cached_reqs=[],
|
|
|
|
num_scheduled_tokens={
|
|
|
|
requests[0].request_id: 1,
|
|
|
|
requests[1].request_id: 2
|
|
|
|
},
|
|
|
|
total_num_scheduled_tokens=3,
|
|
|
|
scheduled_encoder_inputs={},
|
|
|
|
scheduled_spec_decode_tokens={
|
|
|
|
requests[0].request_id: [],
|
|
|
|
requests[1].request_id: [10]
|
|
|
|
},
|
|
|
|
num_common_prefix_blocks=0,
|
|
|
|
finished_req_ids=set(),
|
2025-03-07 10:19:11 -05:00
|
|
|
free_encoder_input_ids=[],
|
|
|
|
structured_output_request_ids={},
|
|
|
|
grammar_bitmask=None)
|
2025-02-15 18:05:11 -08:00
|
|
|
|
|
|
|
model_output = ModelRunnerOutput(
|
|
|
|
req_ids=[req.request_id for req in requests],
|
|
|
|
req_id_to_index={
|
|
|
|
req.request_id: i
|
|
|
|
for i, req in enumerate(requests)
|
|
|
|
},
|
|
|
|
sampled_token_ids=[[EOS_TOKEN_ID],
|
|
|
|
[10,
|
|
|
|
11]], # First request hits EOS, second continues
|
2025-02-17 15:40:12 -08:00
|
|
|
spec_token_ids=None,
|
2025-02-15 18:05:11 -08:00
|
|
|
logprobs=None,
|
|
|
|
prompt_logprobs_dict={})
|
|
|
|
|
|
|
|
scheduler.update_from_output(scheduler_output, model_output)
|
|
|
|
|
|
|
|
# Verify first request stopped, second continues
|
|
|
|
assert len(scheduler.running) == 1
|
|
|
|
assert scheduler.running[0].request_id == requests[1].request_id
|
|
|
|
assert requests[0].status == RequestStatus.FINISHED_STOPPED
|
|
|
|
assert requests[0].request_id in scheduler.finished_req_ids
|
|
|
|
assert list(requests[0].output_token_ids) == [EOS_TOKEN_ID]
|
|
|
|
assert list(requests[1].output_token_ids) == [10, 11]
|
|
|
|
|
|
|
|
# Test case 2: Stop on custom stop token
|
|
|
|
scheduler = create_scheduler()
|
|
|
|
requests = create_requests(num_requests=2,
|
|
|
|
max_tokens=10,
|
|
|
|
stop_token_ids=[42, 43])
|
|
|
|
for req in requests:
|
|
|
|
req.num_computed_tokens = req.num_tokens
|
|
|
|
scheduler.requests[req.request_id] = req
|
|
|
|
scheduler.running.append(req)
|
|
|
|
scheduler.scheduled_req_ids.add(req.request_id)
|
|
|
|
|
|
|
|
scheduler_output = SchedulerOutput(scheduled_new_reqs=[],
|
|
|
|
scheduled_cached_reqs=[],
|
|
|
|
num_scheduled_tokens={
|
|
|
|
requests[0].request_id: 3,
|
|
|
|
requests[1].request_id: 2
|
|
|
|
},
|
|
|
|
total_num_scheduled_tokens=5,
|
|
|
|
scheduled_encoder_inputs={},
|
|
|
|
scheduled_spec_decode_tokens={
|
|
|
|
requests[0].request_id: [10, 42],
|
|
|
|
requests[1].request_id: [13]
|
|
|
|
},
|
|
|
|
num_common_prefix_blocks=0,
|
|
|
|
finished_req_ids=set(),
|
2025-03-07 10:19:11 -05:00
|
|
|
free_encoder_input_ids=[],
|
|
|
|
structured_output_request_ids={},
|
|
|
|
grammar_bitmask=None)
|
2025-02-15 18:05:11 -08:00
|
|
|
|
|
|
|
model_output = ModelRunnerOutput(
|
|
|
|
req_ids=[req.request_id for req in requests],
|
|
|
|
req_id_to_index={
|
|
|
|
req.request_id: i
|
|
|
|
for i, req in enumerate(requests)
|
|
|
|
},
|
|
|
|
sampled_token_ids=[[10, 42, 12],
|
|
|
|
[13, 14]], # First request hits stop token
|
2025-02-17 15:40:12 -08:00
|
|
|
spec_token_ids=None,
|
2025-02-15 18:05:11 -08:00
|
|
|
logprobs=None,
|
|
|
|
prompt_logprobs_dict={})
|
|
|
|
|
|
|
|
scheduler.update_from_output(scheduler_output, model_output)
|
|
|
|
|
|
|
|
# Verify first request stopped on custom token
|
|
|
|
assert len(scheduler.running) == 1
|
|
|
|
assert scheduler.running[0].request_id == requests[1].request_id
|
|
|
|
assert requests[0].status == RequestStatus.FINISHED_STOPPED
|
|
|
|
assert requests[0].stop_reason == 42
|
|
|
|
assert requests[0].request_id in scheduler.finished_req_ids
|
|
|
|
assert list(requests[0].output_token_ids) == [10, 42]
|
|
|
|
assert list(requests[1].output_token_ids) == [13, 14]
|
|
|
|
|
|
|
|
# Test case 3: Stop on max tokens
|
|
|
|
scheduler = create_scheduler()
|
|
|
|
requests = create_requests(num_requests=2, max_tokens=2)
|
|
|
|
for req in requests:
|
|
|
|
req.num_computed_tokens = req.num_tokens
|
|
|
|
scheduler.requests[req.request_id] = req
|
|
|
|
scheduler.running.append(req)
|
|
|
|
scheduler.scheduled_req_ids.add(req.request_id)
|
|
|
|
|
|
|
|
scheduler_output = SchedulerOutput(scheduled_new_reqs=[],
|
|
|
|
scheduled_cached_reqs=[],
|
|
|
|
num_scheduled_tokens={
|
|
|
|
requests[0].request_id: 3,
|
|
|
|
requests[1].request_id: 1
|
|
|
|
},
|
|
|
|
total_num_scheduled_tokens=4,
|
|
|
|
scheduled_encoder_inputs={},
|
|
|
|
scheduled_spec_decode_tokens={
|
|
|
|
requests[0].request_id: [10, 11],
|
|
|
|
requests[1].request_id: []
|
|
|
|
},
|
|
|
|
num_common_prefix_blocks=0,
|
|
|
|
finished_req_ids=set(),
|
2025-03-07 10:19:11 -05:00
|
|
|
free_encoder_input_ids=[],
|
|
|
|
structured_output_request_ids={},
|
|
|
|
grammar_bitmask=None)
|
2025-02-15 18:05:11 -08:00
|
|
|
|
|
|
|
model_output = ModelRunnerOutput(
|
|
|
|
req_ids=[req.request_id for req in requests],
|
|
|
|
req_id_to_index={
|
|
|
|
req.request_id: i
|
|
|
|
for i, req in enumerate(requests)
|
|
|
|
},
|
|
|
|
sampled_token_ids=[[10, 11, 12],
|
|
|
|
[13]], # First request exceeds max_tokens
|
2025-02-17 15:40:12 -08:00
|
|
|
spec_token_ids=None,
|
2025-02-15 18:05:11 -08:00
|
|
|
logprobs=None,
|
|
|
|
prompt_logprobs_dict={})
|
|
|
|
|
|
|
|
scheduler.update_from_output(scheduler_output, model_output)
|
|
|
|
|
|
|
|
# Verify first request stopped due to length
|
|
|
|
assert len(scheduler.running) == 1
|
|
|
|
assert scheduler.running[0].request_id == requests[1].request_id
|
|
|
|
assert requests[0].status == RequestStatus.FINISHED_LENGTH_CAPPED
|
|
|
|
assert requests[0].request_id in scheduler.finished_req_ids
|
|
|
|
assert list(requests[0].output_token_ids) == [10, 11
|
|
|
|
] # Truncated to max_tokens
|
|
|
|
assert list(requests[1].output_token_ids) == [13]
|
|
|
|
|
|
|
|
# Test case 4: Ignore EOS flag
|
|
|
|
scheduler = create_scheduler()
|
|
|
|
requests = create_requests(num_requests=1, max_tokens=10)
|
|
|
|
requests[0].sampling_params.ignore_eos = True
|
|
|
|
requests[0].num_computed_tokens = requests[0].num_tokens
|
|
|
|
scheduler.requests[requests[0].request_id] = requests[0]
|
|
|
|
scheduler.running.append(requests[0])
|
|
|
|
scheduler.scheduled_req_ids.add(requests[0].request_id)
|
|
|
|
|
|
|
|
scheduler_output = SchedulerOutput(
|
|
|
|
scheduled_new_reqs=[],
|
|
|
|
scheduled_cached_reqs=[],
|
|
|
|
num_scheduled_tokens={requests[0].request_id: 3},
|
|
|
|
total_num_scheduled_tokens=3,
|
|
|
|
scheduled_encoder_inputs={},
|
|
|
|
scheduled_spec_decode_tokens={
|
|
|
|
requests[0].request_id: [EOS_TOKEN_ID, 10]
|
|
|
|
},
|
|
|
|
num_common_prefix_blocks=0,
|
|
|
|
finished_req_ids=set(),
|
2025-03-07 10:19:11 -05:00
|
|
|
free_encoder_input_ids=[],
|
|
|
|
structured_output_request_ids={},
|
|
|
|
grammar_bitmask=None)
|
2025-02-15 18:05:11 -08:00
|
|
|
|
|
|
|
model_output = ModelRunnerOutput(
|
|
|
|
req_ids=[requests[0].request_id],
|
|
|
|
req_id_to_index={requests[0].request_id: 0},
|
|
|
|
sampled_token_ids=[[EOS_TOKEN_ID, 10, 11]],
|
2025-02-17 15:40:12 -08:00
|
|
|
spec_token_ids=None,
|
2025-02-15 18:05:11 -08:00
|
|
|
logprobs=None,
|
|
|
|
prompt_logprobs_dict={})
|
|
|
|
|
|
|
|
scheduler.update_from_output(scheduler_output, model_output)
|
|
|
|
|
|
|
|
# Verify request continues past EOS
|
|
|
|
assert len(scheduler.running) == 1
|
|
|
|
assert not requests[0].is_finished()
|
|
|
|
assert list(requests[0].output_token_ids) == [EOS_TOKEN_ID, 10, 11]
|
|
|
|
|
|
|
|
|
2025-03-07 20:48:12 -05:00
|
|
|
@pytest.mark.parametrize("enable_prefix_caching, prompt_logprobs", [
|
|
|
|
(None, None),
|
|
|
|
(True, 5),
|
|
|
|
])
|
|
|
|
def test_schedule_concurrent_batches(enable_prefix_caching: Optional[bool],
|
|
|
|
prompt_logprobs: Optional[int]):
|
2025-02-15 03:59:01 -08:00
|
|
|
scheduler = create_scheduler(
|
|
|
|
max_num_batched_tokens=1024,
|
|
|
|
max_num_seqs=2,
|
2025-03-07 20:48:12 -05:00
|
|
|
enable_prefix_caching=enable_prefix_caching,
|
2025-02-15 03:59:01 -08:00
|
|
|
)
|
|
|
|
requests = create_requests(
|
|
|
|
num_requests=2,
|
|
|
|
num_tokens=512,
|
2025-03-07 20:48:12 -05:00
|
|
|
prompt_logprobs=prompt_logprobs,
|
2025-02-15 03:59:01 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
# Schedule the first request.
|
|
|
|
scheduler.add_request(requests[0])
|
|
|
|
scheduler_output0 = scheduler.schedule()
|
|
|
|
assert len(scheduler_output0.scheduled_new_reqs) == 1
|
|
|
|
assert scheduler_output0.num_scheduled_tokens[
|
|
|
|
requests[0].request_id] == 512
|
|
|
|
|
|
|
|
# The first request is still running, so only schedule the second request.
|
|
|
|
scheduler.add_request(requests[1])
|
|
|
|
scheduler_output1 = scheduler.schedule()
|
|
|
|
assert len(scheduler_output1.scheduled_new_reqs) == 1
|
|
|
|
assert scheduler_output1.num_scheduled_tokens[
|
|
|
|
requests[1].request_id] == 512
|
|
|
|
|
|
|
|
# Model output of the first request.
|
|
|
|
model_runner_output = ModelRunnerOutput(
|
|
|
|
req_ids=[requests[0].request_id],
|
|
|
|
req_id_to_index={requests[0].request_id: 0},
|
2025-02-15 18:05:11 -08:00
|
|
|
sampled_token_ids=[[0]],
|
2025-02-17 15:40:12 -08:00
|
|
|
spec_token_ids=None,
|
2025-02-15 03:59:01 -08:00
|
|
|
logprobs=None,
|
|
|
|
prompt_logprobs_dict={},
|
|
|
|
)
|
|
|
|
scheduler.update_from_output(scheduler_output0, model_runner_output)
|
|
|
|
|
|
|
|
# Schedule the next step.
|
|
|
|
# The first request can be scheduled again while the second
|
|
|
|
# request is still running.
|
|
|
|
scheduler_output2 = scheduler.schedule()
|
|
|
|
assert scheduler_output2.num_scheduled_tokens[requests[0].request_id] == 1
|
|
|
|
|
|
|
|
# Model output of the second request.
|
|
|
|
model_runner_output = ModelRunnerOutput(
|
|
|
|
req_ids=[requests[1].request_id],
|
|
|
|
req_id_to_index={requests[1].request_id: 0},
|
2025-02-15 18:05:11 -08:00
|
|
|
sampled_token_ids=[[0]],
|
2025-02-17 15:40:12 -08:00
|
|
|
spec_token_ids=None,
|
2025-02-15 03:59:01 -08:00
|
|
|
logprobs=None,
|
|
|
|
prompt_logprobs_dict={},
|
|
|
|
)
|
|
|
|
scheduler.update_from_output(scheduler_output1, model_runner_output)
|
2025-04-01 18:45:04 +01:00
|
|
|
|
|
|
|
|
|
|
|
# Note - these test cases mirror some of those in test_rejection_sampler.py
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"spec_tokens,output_tokens,expected",
|
|
|
|
[
|
|
|
|
([[1, 2, 3]], [[1, 2, 3, 4]], (3, 3)), # perfect match
|
|
|
|
([[1, 2, 3]], [[1, 5]], (3, 1)), # early mismatch
|
|
|
|
([[1, 2], [3]], [[1, 2, 5], [3, 4]], (3, 3)), # multiple sequences
|
|
|
|
([[1]], [[1, 2]], (1, 1)), # single token sequence
|
|
|
|
([[]], [[5]], (0, 0)), # empty sequence
|
|
|
|
([[1, 2, 3], [4, 5, 6]], [[1, 2, 7], [4, 8]],
|
|
|
|
(6, 3)), # multiple mismatches
|
|
|
|
])
|
|
|
|
def test_schedule_spec_decoding_stats(spec_tokens, output_tokens, expected):
|
|
|
|
"""Test scheduling behavior with speculative decoding.
|
|
|
|
|
|
|
|
This test verifies that:
|
|
|
|
1. Speculated tokens get scheduled correctly
|
|
|
|
2. Spec decoding stats properly count number of draft and accepted tokens
|
|
|
|
"""
|
|
|
|
scheduler = create_scheduler()
|
|
|
|
requests = create_requests(num_requests=len(spec_tokens), num_tokens=1)
|
|
|
|
req_ids = []
|
|
|
|
req_to_index = {}
|
|
|
|
for i, request in enumerate(requests):
|
|
|
|
scheduler.add_request(request)
|
|
|
|
req_ids.append(request.request_id)
|
|
|
|
req_to_index[request.request_id] = i
|
|
|
|
|
|
|
|
# Schedule a decode, which will also draft speculative tokens
|
|
|
|
output = scheduler.schedule()
|
|
|
|
assert len(output.scheduled_new_reqs) == len(requests)
|
|
|
|
assert output.total_num_scheduled_tokens == len(requests)
|
|
|
|
for i in range(len(requests)):
|
|
|
|
req_id = requests[i].request_id
|
|
|
|
assert output.num_scheduled_tokens[req_id] == 1
|
|
|
|
assert req_id not in output.scheduled_spec_decode_tokens
|
|
|
|
|
|
|
|
model_runner_output = ModelRunnerOutput(
|
|
|
|
req_ids=req_ids,
|
|
|
|
req_id_to_index=req_to_index,
|
|
|
|
sampled_token_ids=[[0] for _ in range(len(requests))],
|
|
|
|
spec_token_ids=spec_tokens,
|
|
|
|
logprobs=None,
|
|
|
|
prompt_logprobs_dict={},
|
|
|
|
)
|
|
|
|
engine_core_outputs = scheduler.update_from_output(output,
|
|
|
|
model_runner_output)
|
|
|
|
|
|
|
|
for i in range(len(requests)):
|
|
|
|
running_req = scheduler.running[i]
|
|
|
|
# The prompt token
|
|
|
|
assert running_req.num_computed_tokens == 1
|
|
|
|
# The prompt token and the sampled token
|
|
|
|
assert running_req.num_tokens == 2
|
|
|
|
# The prompt token, the sampled token, and the speculated tokens
|
|
|
|
assert running_req.num_tokens_with_spec == 2 + len(spec_tokens[i])
|
|
|
|
|
|
|
|
# No draft or accepted tokens counted yet
|
|
|
|
assert engine_core_outputs.scheduler_stats.spec_decoding_stats is not None
|
|
|
|
stats = engine_core_outputs.scheduler_stats.spec_decoding_stats
|
|
|
|
assert stats.num_draft_tokens == 0
|
|
|
|
assert stats.num_accepted_tokens == 0
|
|
|
|
|
|
|
|
# Schedule the speculated tokens for validation
|
|
|
|
output = scheduler.schedule()
|
|
|
|
assert len(output.scheduled_new_reqs) == 0
|
|
|
|
# The sampled token and speculated tokens
|
|
|
|
assert output.total_num_scheduled_tokens == \
|
|
|
|
len(requests) + sum(len(ids) for ids in spec_tokens)
|
|
|
|
for i in range(len(requests)):
|
|
|
|
req_id = requests[i].request_id
|
|
|
|
assert output.num_scheduled_tokens[req_id] == 1 + len(spec_tokens[i])
|
|
|
|
if spec_tokens[i]:
|
|
|
|
assert len(output.scheduled_spec_decode_tokens[req_id]) == \
|
|
|
|
len(spec_tokens[i])
|
|
|
|
else:
|
|
|
|
assert req_id not in output.scheduled_spec_decode_tokens
|
|
|
|
|
|
|
|
model_runner_output = ModelRunnerOutput(
|
|
|
|
req_ids=req_ids,
|
|
|
|
req_id_to_index=req_to_index,
|
|
|
|
sampled_token_ids=output_tokens,
|
|
|
|
spec_token_ids=None,
|
|
|
|
logprobs=None,
|
|
|
|
prompt_logprobs_dict={},
|
|
|
|
)
|
|
|
|
engine_core_outputs = scheduler.update_from_output(output,
|
|
|
|
model_runner_output)
|
|
|
|
|
|
|
|
assert engine_core_outputs.scheduler_stats.spec_decoding_stats is not None
|
|
|
|
stats = engine_core_outputs.scheduler_stats.spec_decoding_stats
|
|
|
|
assert stats.num_draft_tokens == expected[0]
|
|
|
|
assert stats.num_accepted_tokens == expected[1]
|