2023-03-30 14:51:46 -07:00
|
|
|
import enum
|
|
|
|
import time
|
2023-09-03 21:43:43 -07:00
|
|
|
from typing import Dict, Iterable, List, Optional, Tuple, Union
|
2023-02-13 02:39:53 +00:00
|
|
|
|
2023-06-17 03:07:40 -07:00
|
|
|
from vllm.config import CacheConfig, SchedulerConfig
|
|
|
|
from vllm.core.block_manager import BlockSpaceManager
|
|
|
|
from vllm.core.policy import PolicyFactory
|
|
|
|
from vllm.logger import init_logger
|
|
|
|
from vllm.sequence import (Sequence, SequenceData, SequenceGroup,
|
2023-09-04 17:29:42 -07:00
|
|
|
SequenceGroupMetadata, SequenceStatus)
|
2023-02-13 02:39:53 +00:00
|
|
|
|
2023-05-10 01:06:53 -07:00
|
|
|
logger = init_logger(__name__)
|
2023-05-10 01:57:07 -07:00
|
|
|
|
2023-05-10 01:06:53 -07:00
|
|
|
|
2023-03-30 14:51:46 -07:00
|
|
|
class PreemptionMode(enum.Enum):
|
|
|
|
"""Preemption modes.
|
|
|
|
|
|
|
|
1. Swapping: Swap out the blocks of the preempted sequences to CPU memory
|
|
|
|
and swap them back in when the sequences are resumed.
|
|
|
|
2. Recomputation: Discard the blocks of the preempted sequences and
|
|
|
|
recompute them when the sequences are resumed, treating the sequences as
|
|
|
|
new prompts.
|
|
|
|
"""
|
|
|
|
SWAP = enum.auto()
|
|
|
|
RECOMPUTE = enum.auto()
|
|
|
|
|
|
|
|
|
2023-05-20 13:06:59 -07:00
|
|
|
class SchedulerOutputs:
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
2023-08-02 16:42:01 -07:00
|
|
|
scheduled_seq_groups: List[SequenceGroup],
|
|
|
|
prompt_run: bool,
|
|
|
|
num_batched_tokens: int,
|
2023-05-20 13:06:59 -07:00
|
|
|
blocks_to_swap_in: Dict[int, int],
|
|
|
|
blocks_to_swap_out: Dict[int, int],
|
|
|
|
blocks_to_copy: Dict[int, List[int]],
|
2023-08-02 16:42:01 -07:00
|
|
|
ignored_seq_groups: List[SequenceGroup],
|
2023-05-20 13:06:59 -07:00
|
|
|
) -> None:
|
2023-08-02 16:42:01 -07:00
|
|
|
self.scheduled_seq_groups = scheduled_seq_groups
|
|
|
|
self.prompt_run = prompt_run
|
|
|
|
self.num_batched_tokens = num_batched_tokens
|
2023-05-20 13:06:59 -07:00
|
|
|
self.blocks_to_swap_in = blocks_to_swap_in
|
|
|
|
self.blocks_to_swap_out = blocks_to_swap_out
|
|
|
|
self.blocks_to_copy = blocks_to_copy
|
|
|
|
# Swap in and swap out should never happen at the same time.
|
|
|
|
assert not (blocks_to_swap_in and blocks_to_swap_out)
|
2023-08-02 16:42:01 -07:00
|
|
|
self.ignored_seq_groups = ignored_seq_groups
|
2023-05-20 13:06:59 -07:00
|
|
|
|
|
|
|
def is_empty(self) -> bool:
|
2023-08-02 16:42:01 -07:00
|
|
|
# NOTE: We do not consider the ignored sequence groups.
|
|
|
|
return (not self.scheduled_seq_groups and not self.blocks_to_swap_in
|
|
|
|
and not self.blocks_to_swap_out and not self.blocks_to_copy)
|
2023-05-20 13:06:59 -07:00
|
|
|
|
|
|
|
|
2023-02-13 02:39:53 +00:00
|
|
|
class Scheduler:
|
|
|
|
|
2023-02-13 18:51:33 +00:00
|
|
|
def __init__(
|
2023-02-13 02:39:53 +00:00
|
|
|
self,
|
2023-05-20 13:06:59 -07:00
|
|
|
scheduler_config: SchedulerConfig,
|
|
|
|
cache_config: CacheConfig,
|
2023-02-13 02:39:53 +00:00
|
|
|
) -> None:
|
2023-05-20 13:06:59 -07:00
|
|
|
self.scheduler_config = scheduler_config
|
|
|
|
self.cache_config = cache_config
|
2023-02-13 09:37:00 +00:00
|
|
|
|
2023-08-27 23:00:56 -07:00
|
|
|
self.prompt_limit = min(self.scheduler_config.max_model_len,
|
|
|
|
self.scheduler_config.max_num_batched_tokens)
|
|
|
|
|
2023-03-30 14:51:46 -07:00
|
|
|
# Instantiate the scheduling policy.
|
2023-07-03 11:31:55 -07:00
|
|
|
self.policy = PolicyFactory.get_policy(policy_name="fcfs")
|
2023-02-13 09:37:00 +00:00
|
|
|
# Create the block space manager.
|
2023-02-13 02:39:53 +00:00
|
|
|
self.block_manager = BlockSpaceManager(
|
2023-05-20 13:06:59 -07:00
|
|
|
block_size=self.cache_config.block_size,
|
|
|
|
num_gpu_blocks=self.cache_config.num_gpu_blocks,
|
|
|
|
num_cpu_blocks=self.cache_config.num_cpu_blocks,
|
2023-09-28 19:41:03 +02:00
|
|
|
sliding_window=self.cache_config.sliding_window)
|
2023-02-13 02:39:53 +00:00
|
|
|
|
2023-09-04 17:29:42 -07:00
|
|
|
# TODO(zhuohan): Use deque instead of list for better performance.
|
2023-03-30 14:51:46 -07:00
|
|
|
# Sequence groups in the WAITING state.
|
|
|
|
self.waiting: List[SequenceGroup] = []
|
|
|
|
# Sequence groups in the RUNNING state.
|
2023-02-23 07:54:20 +00:00
|
|
|
self.running: List[SequenceGroup] = []
|
2023-03-30 14:51:46 -07:00
|
|
|
# Sequence groups in the SWAPPED state.
|
2023-02-13 02:39:53 +00:00
|
|
|
self.swapped: List[SequenceGroup] = []
|
2023-02-13 09:37:00 +00:00
|
|
|
|
2023-05-20 13:06:59 -07:00
|
|
|
def add_seq_group(self, seq_group: SequenceGroup) -> None:
|
2023-03-30 14:51:46 -07:00
|
|
|
# Add sequence groups to the waiting queue.
|
2023-05-20 13:06:59 -07:00
|
|
|
self.waiting.append(seq_group)
|
2023-02-24 11:46:43 +00:00
|
|
|
|
2023-09-03 21:43:43 -07:00
|
|
|
def abort_seq_group(self, request_id: Union[str, Iterable[str]]) -> None:
|
|
|
|
if isinstance(request_id, str):
|
|
|
|
request_id = (request_id, )
|
|
|
|
request_ids = set(request_id)
|
2023-06-05 23:44:50 +08:00
|
|
|
for state_queue in [self.waiting, self.running, self.swapped]:
|
2023-09-07 13:43:45 -07:00
|
|
|
# We need to reverse the list as we are removing elements
|
|
|
|
# from it as we iterate over it. If we don't do it,
|
|
|
|
# indices will get messed up and we will skip over elements.
|
|
|
|
for seq_group in reversed(state_queue):
|
2023-09-03 21:43:43 -07:00
|
|
|
if seq_group.request_id in request_ids:
|
2023-06-05 23:44:50 +08:00
|
|
|
# Remove the sequence group from the state queue.
|
|
|
|
state_queue.remove(seq_group)
|
2023-09-04 17:29:42 -07:00
|
|
|
for seq in seq_group.get_seqs():
|
2023-06-05 23:44:50 +08:00
|
|
|
if seq.is_finished():
|
|
|
|
continue
|
2023-09-04 17:29:42 -07:00
|
|
|
seq.status = SequenceStatus.FINISHED_ABORTED
|
|
|
|
self.free_seq(seq)
|
2023-09-03 21:43:43 -07:00
|
|
|
request_ids.remove(seq_group.request_id)
|
|
|
|
if not request_ids:
|
|
|
|
return
|
2023-06-05 23:44:50 +08:00
|
|
|
|
2023-05-20 13:06:59 -07:00
|
|
|
def has_unfinished_seqs(self) -> bool:
|
|
|
|
return self.waiting or self.running or self.swapped
|
|
|
|
|
2023-05-28 03:20:05 -07:00
|
|
|
def get_num_unfinished_seq_groups(self) -> int:
|
|
|
|
return len(self.waiting) + len(self.running) + len(self.swapped)
|
|
|
|
|
2023-08-02 16:42:01 -07:00
|
|
|
def _schedule(self) -> SchedulerOutputs:
|
2023-02-24 10:22:39 +00:00
|
|
|
# Blocks that need to be swaped or copied before model execution.
|
|
|
|
blocks_to_swap_in: Dict[int, int] = {}
|
|
|
|
blocks_to_swap_out: Dict[int, int] = {}
|
2023-03-10 09:58:21 -08:00
|
|
|
blocks_to_copy: Dict[int, List[int]] = {}
|
2023-02-24 10:22:39 +00:00
|
|
|
|
2023-03-30 14:51:46 -07:00
|
|
|
# Fix the current time.
|
2023-10-02 19:22:05 -07:00
|
|
|
now = time.monotonic()
|
2023-03-30 14:51:46 -07:00
|
|
|
|
2023-08-02 16:42:01 -07:00
|
|
|
# Join waiting sequences if possible.
|
|
|
|
if not self.swapped:
|
|
|
|
ignored_seq_groups: List[SequenceGroup] = []
|
|
|
|
scheduled: List[SequenceGroup] = []
|
2023-09-04 17:29:42 -07:00
|
|
|
# The total number of sequences on the fly, including the
|
|
|
|
# requests in the generation phase.
|
|
|
|
num_curr_seqs = sum(seq_group.get_max_num_running_seqs()
|
|
|
|
for seq_group in self.running)
|
2023-10-16 17:48:42 -07:00
|
|
|
seq_lens: List[int] = []
|
|
|
|
|
2023-08-02 16:42:01 -07:00
|
|
|
# Optimization: We do not sort the waiting queue since the preempted
|
|
|
|
# sequence groups are added to the front and the new sequence groups
|
|
|
|
# are added to the back.
|
|
|
|
while self.waiting:
|
|
|
|
seq_group = self.waiting[0]
|
|
|
|
|
2023-09-04 17:29:42 -07:00
|
|
|
assert seq_group.num_seqs() == 1, (
|
|
|
|
"Waiting sequence group should have only one prompt "
|
|
|
|
"sequence.")
|
2023-08-02 16:42:01 -07:00
|
|
|
num_prompt_tokens = seq_group.get_seqs()[0].get_len()
|
2023-08-27 23:00:56 -07:00
|
|
|
if num_prompt_tokens > self.prompt_limit:
|
2023-08-02 16:42:01 -07:00
|
|
|
logger.warning(
|
|
|
|
f"Input prompt ({num_prompt_tokens} tokens) is too long"
|
2023-08-27 23:00:56 -07:00
|
|
|
f" and exceeds limit of {self.prompt_limit}")
|
2023-08-02 16:42:01 -07:00
|
|
|
for seq in seq_group.get_seqs():
|
|
|
|
seq.status = SequenceStatus.FINISHED_IGNORED
|
|
|
|
ignored_seq_groups.append(seq_group)
|
|
|
|
self.waiting.pop(0)
|
2023-08-27 23:00:56 -07:00
|
|
|
continue
|
2023-08-02 16:42:01 -07:00
|
|
|
|
|
|
|
# If the sequence group cannot be allocated, stop.
|
|
|
|
if not self.block_manager.can_allocate(seq_group):
|
|
|
|
break
|
|
|
|
|
|
|
|
# If the number of batched tokens exceeds the limit, stop.
|
2023-10-16 17:48:42 -07:00
|
|
|
new_seq_lens = seq_lens + [num_prompt_tokens]
|
|
|
|
num_batched_tokens = len(new_seq_lens) * max(new_seq_lens)
|
|
|
|
if (num_batched_tokens >
|
2023-08-02 16:42:01 -07:00
|
|
|
self.scheduler_config.max_num_batched_tokens):
|
|
|
|
break
|
|
|
|
|
|
|
|
# The total number of sequences in the RUNNING state should not
|
|
|
|
# exceed the maximum number of sequences.
|
2023-09-04 17:29:42 -07:00
|
|
|
num_new_seqs = seq_group.get_max_num_running_seqs()
|
2023-08-02 16:42:01 -07:00
|
|
|
if (num_curr_seqs + num_new_seqs >
|
|
|
|
self.scheduler_config.max_num_seqs):
|
|
|
|
break
|
|
|
|
|
2023-10-16 17:48:42 -07:00
|
|
|
num_paddings = num_batched_tokens - sum(new_seq_lens)
|
|
|
|
if num_paddings > self.scheduler_config.max_paddings:
|
|
|
|
break
|
|
|
|
seq_lens = new_seq_lens
|
|
|
|
|
2023-08-02 16:42:01 -07:00
|
|
|
seq_group = self.waiting.pop(0)
|
|
|
|
self._allocate(seq_group)
|
|
|
|
self.running.append(seq_group)
|
2023-09-04 17:29:42 -07:00
|
|
|
num_curr_seqs += num_new_seqs
|
2023-08-02 16:42:01 -07:00
|
|
|
scheduled.append(seq_group)
|
|
|
|
|
2023-09-17 16:48:56 +08:00
|
|
|
if scheduled or ignored_seq_groups:
|
2023-08-02 16:42:01 -07:00
|
|
|
scheduler_outputs = SchedulerOutputs(
|
|
|
|
scheduled_seq_groups=scheduled,
|
|
|
|
prompt_run=True,
|
2023-10-16 17:48:42 -07:00
|
|
|
num_batched_tokens=len(seq_lens) * max(seq_lens),
|
2023-08-02 16:42:01 -07:00
|
|
|
blocks_to_swap_in=blocks_to_swap_in,
|
|
|
|
blocks_to_swap_out=blocks_to_swap_out,
|
|
|
|
blocks_to_copy=blocks_to_copy,
|
|
|
|
ignored_seq_groups=ignored_seq_groups,
|
|
|
|
)
|
|
|
|
return scheduler_outputs
|
|
|
|
|
|
|
|
# NOTE(woosuk): Preemption happens only when there is no available slot
|
|
|
|
# to keep all the sequence groups in the RUNNING state.
|
2023-03-30 14:51:46 -07:00
|
|
|
# In this case, the policy is responsible for deciding which sequence
|
|
|
|
# groups to preempt.
|
|
|
|
self.running = self.policy.sort_by_priority(now, self.running)
|
|
|
|
|
|
|
|
# Reserve new token slots for the running sequence groups.
|
|
|
|
running: List[SequenceGroup] = []
|
|
|
|
preempted: List[SequenceGroup] = []
|
|
|
|
while self.running:
|
|
|
|
seq_group = self.running.pop(0)
|
2023-05-10 00:58:31 -07:00
|
|
|
while not self.block_manager.can_append_slot(seq_group):
|
2023-03-30 14:51:46 -07:00
|
|
|
if self.running:
|
|
|
|
# Preempt the lowest-priority sequence groups.
|
|
|
|
victim_seq_group = self.running.pop(-1)
|
|
|
|
self._preempt(victim_seq_group, blocks_to_swap_out)
|
|
|
|
preempted.append(victim_seq_group)
|
|
|
|
else:
|
|
|
|
# No other sequence groups can be preempted.
|
|
|
|
# Preempt the current sequence group.
|
|
|
|
self._preempt(seq_group, blocks_to_swap_out)
|
|
|
|
preempted.append(seq_group)
|
2023-02-13 02:39:53 +00:00
|
|
|
break
|
|
|
|
else:
|
2023-03-30 14:51:46 -07:00
|
|
|
# Append new slots to the sequence group.
|
2023-05-10 00:58:31 -07:00
|
|
|
self._append_slot(seq_group, blocks_to_copy)
|
2023-03-30 14:51:46 -07:00
|
|
|
running.append(seq_group)
|
|
|
|
self.running = running
|
|
|
|
|
|
|
|
# Swap in the sequence groups in the SWAPPED state if possible.
|
|
|
|
self.swapped = self.policy.sort_by_priority(now, self.swapped)
|
2023-09-04 17:29:42 -07:00
|
|
|
if not preempted:
|
|
|
|
num_curr_seqs = sum(seq_group.get_max_num_running_seqs()
|
|
|
|
for seq_group in self.running)
|
|
|
|
|
|
|
|
while self.swapped:
|
|
|
|
seq_group = self.swapped[0]
|
|
|
|
# If the sequence group cannot be swapped in, stop.
|
|
|
|
if not self.block_manager.can_swap_in(seq_group):
|
|
|
|
break
|
2023-03-10 09:58:21 -08:00
|
|
|
|
2023-09-04 17:29:42 -07:00
|
|
|
# The total number of sequences in the RUNNING state should not
|
|
|
|
# exceed the maximum number of sequences.
|
|
|
|
num_new_seqs = seq_group.get_max_num_running_seqs()
|
|
|
|
if (num_curr_seqs + num_new_seqs >
|
|
|
|
self.scheduler_config.max_num_seqs):
|
|
|
|
break
|
|
|
|
|
|
|
|
seq_group = self.swapped.pop(0)
|
|
|
|
self._swap_in(seq_group, blocks_to_swap_in)
|
|
|
|
self._append_slot(seq_group, blocks_to_copy)
|
|
|
|
num_curr_seqs += num_new_seqs
|
|
|
|
self.running.append(seq_group)
|
|
|
|
|
|
|
|
# Each sequence in the generation phase only takes one token slot.
|
|
|
|
# Therefore, the number of batched tokens is equal to the number of
|
|
|
|
# sequences in the RUNNING state.
|
2023-02-23 07:54:20 +00:00
|
|
|
num_batched_tokens = sum(
|
|
|
|
seq_group.num_seqs(status=SequenceStatus.RUNNING)
|
2023-07-03 11:31:55 -07:00
|
|
|
for seq_group in self.running)
|
2023-02-23 07:54:20 +00:00
|
|
|
|
2023-05-20 13:06:59 -07:00
|
|
|
scheduler_outputs = SchedulerOutputs(
|
2023-08-02 16:42:01 -07:00
|
|
|
scheduled_seq_groups=self.running,
|
|
|
|
prompt_run=False,
|
|
|
|
num_batched_tokens=num_batched_tokens,
|
2023-05-20 13:06:59 -07:00
|
|
|
blocks_to_swap_in=blocks_to_swap_in,
|
|
|
|
blocks_to_swap_out=blocks_to_swap_out,
|
|
|
|
blocks_to_copy=blocks_to_copy,
|
2023-08-02 16:42:01 -07:00
|
|
|
ignored_seq_groups=[],
|
2023-05-20 13:06:59 -07:00
|
|
|
)
|
2023-08-02 16:42:01 -07:00
|
|
|
return scheduler_outputs
|
2023-05-10 01:06:53 -07:00
|
|
|
|
2023-08-02 16:42:01 -07:00
|
|
|
def schedule(self) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs]:
|
2023-03-30 14:51:46 -07:00
|
|
|
# Schedule sequence groups.
|
|
|
|
# This function call changes the internal states of the scheduler
|
|
|
|
# such as self.running, self.swapped, and self.waiting.
|
2023-08-02 16:42:01 -07:00
|
|
|
scheduler_outputs = self._schedule()
|
2023-03-30 14:51:46 -07:00
|
|
|
|
|
|
|
# Create input data structures.
|
2023-05-10 00:58:31 -07:00
|
|
|
seq_group_metadata_list: List[SequenceGroupMetadata] = []
|
2023-08-02 16:42:01 -07:00
|
|
|
for seq_group in scheduler_outputs.scheduled_seq_groups:
|
2023-10-20 23:50:47 +08:00
|
|
|
seq_data: Dict[int, SequenceData] = {}
|
2023-03-10 09:58:21 -08:00
|
|
|
block_tables: Dict[int, List[int]] = {}
|
|
|
|
for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
|
2023-02-23 07:54:20 +00:00
|
|
|
seq_id = seq.seq_id
|
2023-05-10 23:39:12 -07:00
|
|
|
seq_data[seq_id] = seq.data
|
2023-02-23 07:54:20 +00:00
|
|
|
block_tables[seq_id] = self.block_manager.get_block_table(seq)
|
2023-03-10 09:58:21 -08:00
|
|
|
|
2023-05-10 00:58:31 -07:00
|
|
|
seq_group_metadata = SequenceGroupMetadata(
|
2023-05-20 13:06:59 -07:00
|
|
|
request_id=seq_group.request_id,
|
2023-08-02 16:42:01 -07:00
|
|
|
is_prompt=scheduler_outputs.prompt_run,
|
2023-05-10 23:39:12 -07:00
|
|
|
seq_data=seq_data,
|
2023-05-20 13:06:59 -07:00
|
|
|
sampling_params=seq_group.sampling_params,
|
2023-03-10 09:58:21 -08:00
|
|
|
block_tables=block_tables,
|
|
|
|
)
|
2023-05-10 00:58:31 -07:00
|
|
|
seq_group_metadata_list.append(seq_group_metadata)
|
2023-08-02 16:42:01 -07:00
|
|
|
return seq_group_metadata_list, scheduler_outputs
|
2023-02-23 07:54:20 +00:00
|
|
|
|
2023-09-04 17:29:42 -07:00
|
|
|
def fork_seq(self, parent_seq: Sequence, child_seq: Sequence) -> None:
|
|
|
|
self.block_manager.fork(parent_seq, child_seq)
|
2023-02-13 02:39:53 +00:00
|
|
|
|
2023-09-04 17:29:42 -07:00
|
|
|
def free_seq(self, seq: Sequence) -> None:
|
2023-05-21 11:18:00 -07:00
|
|
|
self.block_manager.free(seq)
|
2023-02-13 02:39:53 +00:00
|
|
|
|
2023-05-21 11:18:00 -07:00
|
|
|
def free_finished_seq_groups(self) -> None:
|
|
|
|
self.running = [
|
|
|
|
seq_group for seq_group in self.running
|
|
|
|
if not seq_group.is_finished()
|
|
|
|
]
|
2023-02-24 11:46:43 +00:00
|
|
|
|
2023-03-30 14:51:46 -07:00
|
|
|
def _allocate(self, seq_group: SequenceGroup) -> None:
|
|
|
|
self.block_manager.allocate(seq_group)
|
2023-05-21 11:18:00 -07:00
|
|
|
for seq in seq_group.get_seqs():
|
2023-03-30 14:51:46 -07:00
|
|
|
seq.status = SequenceStatus.RUNNING
|
|
|
|
|
2023-05-10 00:58:31 -07:00
|
|
|
def _append_slot(
|
2023-03-30 14:51:46 -07:00
|
|
|
self,
|
|
|
|
seq_group: SequenceGroup,
|
|
|
|
blocks_to_copy: Dict[int, List[int]],
|
|
|
|
) -> None:
|
|
|
|
for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
|
2023-05-10 00:58:31 -07:00
|
|
|
ret = self.block_manager.append_slot(seq)
|
2023-03-30 14:51:46 -07:00
|
|
|
if ret is not None:
|
|
|
|
src_block, dst_block = ret
|
|
|
|
if src_block in blocks_to_copy:
|
|
|
|
blocks_to_copy[src_block].append(dst_block)
|
|
|
|
else:
|
|
|
|
blocks_to_copy[src_block] = [dst_block]
|
|
|
|
|
|
|
|
def _preempt(
|
|
|
|
self,
|
|
|
|
seq_group: SequenceGroup,
|
|
|
|
blocks_to_swap_out: Dict[int, int],
|
|
|
|
preemption_mode: Optional[PreemptionMode] = None,
|
|
|
|
) -> None:
|
|
|
|
# If preemption mode is not specified, we determine the mode as follows:
|
|
|
|
# We use recomputation by default since it incurs lower overhead than
|
|
|
|
# swapping. However, when the sequence group has multiple sequences
|
2023-09-04 17:29:42 -07:00
|
|
|
# (e.g., beam search), recomputation is not currently supported. In
|
|
|
|
# such a case, we use swapping instead.
|
2023-03-30 14:51:46 -07:00
|
|
|
# FIXME(woosuk): This makes our scheduling policy a bit bizarre.
|
|
|
|
# As swapped sequences are prioritized over waiting sequences,
|
|
|
|
# sequence groups with multiple sequences are implicitly prioritized
|
|
|
|
# over sequence groups with a single sequence.
|
|
|
|
# TODO(woosuk): Support recomputation for sequence groups with multiple
|
|
|
|
# sequences. This may require a more sophisticated CUDA kernel.
|
|
|
|
if preemption_mode is None:
|
2023-09-04 17:29:42 -07:00
|
|
|
if seq_group.get_max_num_running_seqs() == 1:
|
2023-03-30 14:51:46 -07:00
|
|
|
preemption_mode = PreemptionMode.RECOMPUTE
|
|
|
|
else:
|
|
|
|
preemption_mode = PreemptionMode.SWAP
|
|
|
|
if preemption_mode == PreemptionMode.RECOMPUTE:
|
|
|
|
self._preempt_by_recompute(seq_group)
|
|
|
|
elif preemption_mode == PreemptionMode.SWAP:
|
|
|
|
self._preempt_by_swap(seq_group, blocks_to_swap_out)
|
|
|
|
else:
|
2023-11-20 11:58:01 -08:00
|
|
|
raise AssertionError("Invalid preemption mode.")
|
2023-03-30 14:51:46 -07:00
|
|
|
|
|
|
|
def _preempt_by_recompute(
|
|
|
|
self,
|
|
|
|
seq_group: SequenceGroup,
|
|
|
|
) -> None:
|
|
|
|
seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)
|
|
|
|
assert len(seqs) == 1
|
|
|
|
for seq in seqs:
|
|
|
|
seq.status = SequenceStatus.WAITING
|
|
|
|
self.block_manager.free(seq)
|
2023-05-10 01:57:07 -07:00
|
|
|
# NOTE: For FCFS, we insert the preempted sequence group to the front
|
|
|
|
# of the waiting queue.
|
|
|
|
self.waiting.insert(0, seq_group)
|
2023-03-30 14:51:46 -07:00
|
|
|
|
|
|
|
def _preempt_by_swap(
|
|
|
|
self,
|
|
|
|
seq_group: SequenceGroup,
|
|
|
|
blocks_to_swap_out: Dict[int, int],
|
|
|
|
) -> None:
|
|
|
|
self._swap_out(seq_group, blocks_to_swap_out)
|
|
|
|
self.swapped.append(seq_group)
|
|
|
|
|
|
|
|
def _swap_in(
|
|
|
|
self,
|
|
|
|
seq_group: SequenceGroup,
|
|
|
|
blocks_to_swap_in: Dict[int, int],
|
|
|
|
) -> None:
|
|
|
|
mapping = self.block_manager.swap_in(seq_group)
|
|
|
|
blocks_to_swap_in.update(mapping)
|
|
|
|
for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED):
|
|
|
|
seq.status = SequenceStatus.RUNNING
|
|
|
|
|
|
|
|
def _swap_out(
|
|
|
|
self,
|
|
|
|
seq_group: SequenceGroup,
|
|
|
|
blocks_to_swap_out: Dict[int, int],
|
|
|
|
) -> None:
|
2023-06-18 11:39:35 -07:00
|
|
|
if not self.block_manager.can_swap_out(seq_group):
|
|
|
|
# FIXME(woosuk): Abort the sequence group instead of aborting the
|
|
|
|
# entire engine.
|
|
|
|
raise RuntimeError(
|
|
|
|
"Aborted due to the lack of CPU swap space. Please increase "
|
|
|
|
"the swap space to avoid this error.")
|
2023-03-30 14:51:46 -07:00
|
|
|
mapping = self.block_manager.swap_out(seq_group)
|
|
|
|
blocks_to_swap_out.update(mapping)
|
|
|
|
for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
|
|
|
|
seq.status = SequenceStatus.SWAPPED
|