import enum import time from typing import Dict, List, Optional, Tuple from cacheflow.config import CacheConfig, SchedulerConfig from cacheflow.core.block_manager import BlockSpaceManager from cacheflow.core.policy import PolicyFactory from cacheflow.logger import init_logger from cacheflow.sequence import (Sequence, SequenceData, SequenceGroup, SequenceGroupMetadata, SequenceOutputs, SequenceStatus) logger = init_logger(__name__) _LOGGING_INTERVAL_SEC = 10 class PreemptionMode(enum.Enum): """Preemption modes. 1. Swapping: Swap out the blocks of the preempted sequences to CPU memory and swap them back in when the sequences are resumed. 2. Recomputation: Discard the blocks of the preempted sequences and recompute them when the sequences are resumed, treating the sequences as new prompts. """ SWAP = enum.auto() RECOMPUTE = enum.auto() class SchedulerOutputs: def __init__( self, blocks_to_swap_in: Dict[int, int], blocks_to_swap_out: Dict[int, int], blocks_to_copy: Dict[int, List[int]], ) -> None: self.blocks_to_swap_in = blocks_to_swap_in self.blocks_to_swap_out = blocks_to_swap_out self.blocks_to_copy = blocks_to_copy # Swap in and swap out should never happen at the same time. assert not (blocks_to_swap_in and blocks_to_swap_out) def is_empty(self) -> bool: return (not self.blocks_to_swap_in and not self.blocks_to_swap_out and not self.blocks_to_copy) class Scheduler: def __init__( self, scheduler_config: SchedulerConfig, cache_config: CacheConfig, log_stats: bool, ) -> None: self.scheduler_config = scheduler_config self.cache_config = cache_config self.log_stats = log_stats # Instantiate the scheduling policy. self.policy = PolicyFactory.get_policy(policy_name='fcfs') # Create the block space manager. self.block_manager = BlockSpaceManager( block_size=self.cache_config.block_size, num_gpu_blocks=self.cache_config.num_gpu_blocks, num_cpu_blocks=self.cache_config.num_cpu_blocks, ) # Sequence groups in the WAITING state. self.waiting: List[SequenceGroup] = [] # Sequence groups in the RUNNING state. self.running: List[SequenceGroup] = [] # Sequence groups in the SWAPPED state. self.swapped: List[SequenceGroup] = [] self.last_logging_time: float = 0.0 # List[timestamp, num_tokens] self.num_input_tokens: List[Tuple[float, int]] = [] def add_seq_group(self, seq_group: SequenceGroup) -> None: # Add sequence groups to the waiting queue. self.waiting.append(seq_group) def has_unfinished_seqs(self) -> bool: return self.waiting or self.running or self.swapped def get_num_unfinished_seq_groups(self) -> int: return len(self.waiting) + len(self.running) + len(self.swapped) def _schedule(self) -> Tuple[SchedulerOutputs, List[str]]: # Blocks that need to be swaped or copied before model execution. blocks_to_swap_in: Dict[int, int] = {} blocks_to_swap_out: Dict[int, int] = {} blocks_to_copy: Dict[int, List[int]] = {} # Fix the current time. now = time.time() # NOTE(woosuk): We prioritize the sequence groups in the RUNNING state # in order to minimize the preemption overheads. # Preemption happens only when there is no available slot to keep all # the sequence groups in the RUNNING state. # In this case, the policy is responsible for deciding which sequence # groups to preempt. self.running = self.policy.sort_by_priority(now, self.running) # Reserve new token slots for the running sequence groups. running: List[SequenceGroup] = [] preempted: List[SequenceGroup] = [] while self.running: seq_group = self.running.pop(0) while not self.block_manager.can_append_slot(seq_group): if self.running: # Preempt the lowest-priority sequence groups. victim_seq_group = self.running.pop(-1) self._preempt(victim_seq_group, blocks_to_swap_out) preempted.append(victim_seq_group) else: # No other sequence groups can be preempted. # Preempt the current sequence group. self._preempt(seq_group, blocks_to_swap_out) preempted.append(seq_group) break else: # Append new slots to the sequence group. self._append_slot(seq_group, blocks_to_copy) running.append(seq_group) self.running = running # Swap in the sequence groups in the SWAPPED state if possible. self.swapped = self.policy.sort_by_priority(now, self.swapped) while self.swapped and not blocks_to_swap_out: seq_group = self.swapped[0] # If the sequence group has been preempted in this step, stop. if seq_group in preempted: break # If the sequence group cannot be swapped in, stop. if not self.block_manager.can_swap_in(seq_group): break # The total number of sequences in the RUNNING state should not # exceed the maximum number of sequences. num_new_seqs = seq_group.num_seqs(status=SequenceStatus.SWAPPED) num_curr_seqs = len(self.running) if num_curr_seqs + num_new_seqs > self.scheduler_config.max_num_seqs: break seq_group = self.swapped.pop(0) self._swap_in(seq_group, blocks_to_swap_in) self._append_slot(seq_group, blocks_to_copy) self.running.append(seq_group) num_batched_tokens = sum( seq_group.num_seqs(status=SequenceStatus.RUNNING) for seq_group in self.running ) # Join waiting sequences if possible. prompt_group_ids: List[str] = [] # NOTE(woosuk): The sequence groups in the SWAPPED state are strictly # prioritized over the sequence groups in the WAITING state. # This is because we want to bound the amount of CPU memory taken by # the swapped sequence groups. if not self.swapped: # Optimization: We do not sort the waiting queue since the preempted # sequence groups are added to the front and the new sequence groups # are added to the back. while self.waiting: seq_group = self.waiting[0] # If the sequence group has been preempted in this step, stop. if seq_group in preempted: break # If the sequence group cannot be allocated, stop. if not self.block_manager.can_allocate(seq_group): break # If the number of batched tokens exceeds the limit, stop. num_prompt_tokens = seq_group.get_seqs()[0].get_len() if (num_batched_tokens + num_prompt_tokens > self.scheduler_config.max_num_batched_tokens): break # The total number of sequences in the RUNNING state should not # exceed the maximum number of sequences. num_new_seqs = seq_group.num_seqs(status=SequenceStatus.WAITING) num_curr_seqs = len(self.running) if num_curr_seqs + num_new_seqs > self.scheduler_config.max_num_seqs: break seq_group = self.waiting.pop(0) self._allocate(seq_group) self.running.append(seq_group) num_batched_tokens += num_prompt_tokens prompt_group_ids.append(seq_group.request_id) scheduler_outputs = SchedulerOutputs( blocks_to_swap_in=blocks_to_swap_in, blocks_to_swap_out=blocks_to_swap_out, blocks_to_copy=blocks_to_copy, ) if not self.log_stats: return scheduler_outputs, prompt_group_ids # TODO(woosuk): Move the below code to server. now = time.time() if num_batched_tokens > 0: self.num_input_tokens.append((now, num_batched_tokens)) elapsed_time = now - self.last_logging_time if elapsed_time > _LOGGING_INTERVAL_SEC: self.last_logging_time = now self.num_input_tokens = [ (t, n) for t, n in self.num_input_tokens if now - t < _LOGGING_INTERVAL_SEC ] if len(self.num_input_tokens) > 1: total_num_tokens = sum(n for _, n in self.num_input_tokens[:-1]) window = now - self.num_input_tokens[0][0] avg_throughput = total_num_tokens / window else: avg_throughput = 0.0 total_num_gpu_blocks = self.cache_config.num_gpu_blocks num_free_gpu_blocks = self.block_manager.get_num_free_gpu_blocks() num_used_gpu_blocks = total_num_gpu_blocks - num_free_gpu_blocks gpu_cache_usage = num_used_gpu_blocks / total_num_gpu_blocks total_num_cpu_blocks = self.cache_config.num_cpu_blocks if total_num_cpu_blocks > 0: num_free_cpu_blocks = self.block_manager.get_num_free_cpu_blocks() num_used_cpu_blocks = total_num_cpu_blocks - num_free_cpu_blocks cpu_cache_usage = num_used_cpu_blocks / total_num_cpu_blocks else: cpu_cache_usage = 0.0 logger.info( f"Throughput: {avg_throughput:.1f} tokens/s, " f"Running: {len(self.running)} reqs, " f"Swapped: {len(self.swapped)} reqs, " f"Pending: {len(self.waiting)} reqs, " f"GPU KV cache usage: {gpu_cache_usage * 100:.1f}%, " f"CPU KV cache usage: {cpu_cache_usage * 100:.1f}%") return scheduler_outputs, prompt_group_ids def schedule(self) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs]: # Schedule sequence groups. # This function call changes the internal states of the scheduler # such as self.running, self.swapped, and self.waiting. scheduler_outputs, prompt_group_ids = self._schedule() # Create input data structures. seq_group_metadata_list: List[SequenceGroupMetadata] = [] for seq_group in self.running: is_prompt = seq_group.request_id in prompt_group_ids seq_data: Dict[int, List[SequenceData]] = {} block_tables: Dict[int, List[int]] = {} for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): seq_id = seq.seq_id seq_data[seq_id] = seq.data block_tables[seq_id] = self.block_manager.get_block_table(seq) seq_group_metadata = SequenceGroupMetadata( request_id=seq_group.request_id, is_prompt=is_prompt, seq_data=seq_data, sampling_params=seq_group.sampling_params, block_tables=block_tables, ) seq_group_metadata_list.append(seq_group_metadata) return seq_group_metadata_list, scheduler_outputs def update( self, seq_outputs: Dict[int, SequenceOutputs], ) -> List[SequenceGroup]: # Update the running sequences and free blocks. for seq_group in self.running: # Process beam search results before processing the new tokens. for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): output = seq_outputs[seq.seq_id] if seq.seq_id != output.parent_seq_id: # The sequence is a fork of the parent sequence (beam search). # Free the current sequence. self.block_manager.free(seq) # Fork the parent sequence. parent_seq = seq_group.find(output.parent_seq_id) parent_seq.fork(seq) self.block_manager.fork(parent_seq, seq) # Process the new tokens. for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): # Append a new token to the sequence. output = seq_outputs[seq.seq_id] seq.append_token_id(output.output_token, output.logprobs) # Return a shallow copy of the running queue to prevent the queue # from being modified by the caller. return self.running.copy() def free_seq(self, seq: Sequence, finish_status: SequenceStatus) -> None: seq.status = finish_status self.block_manager.free(seq) def free_finished_seq_groups(self) -> None: self.running = [ seq_group for seq_group in self.running if not seq_group.is_finished() ] def _allocate(self, seq_group: SequenceGroup) -> None: self.block_manager.allocate(seq_group) for seq in seq_group.get_seqs(): seq.status = SequenceStatus.RUNNING def _append_slot( self, seq_group: SequenceGroup, blocks_to_copy: Dict[int, List[int]], ) -> None: for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): ret = self.block_manager.append_slot(seq) if ret is not None: src_block, dst_block = ret if src_block in blocks_to_copy: blocks_to_copy[src_block].append(dst_block) else: blocks_to_copy[src_block] = [dst_block] def _preempt( self, seq_group: SequenceGroup, blocks_to_swap_out: Dict[int, int], preemption_mode: Optional[PreemptionMode] = None, ) -> None: # If preemption mode is not specified, we determine the mode as follows: # We use recomputation by default since it incurs lower overhead than # swapping. However, when the sequence group has multiple sequences # (e.g., beam search), recomputation is not supported. In such a case, # we use swapping instead. # FIXME(woosuk): This makes our scheduling policy a bit bizarre. # As swapped sequences are prioritized over waiting sequences, # sequence groups with multiple sequences are implicitly prioritized # over sequence groups with a single sequence. # TODO(woosuk): Support recomputation for sequence groups with multiple # sequences. This may require a more sophisticated CUDA kernel. if preemption_mode is None: seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING) if len(seqs) == 1: preemption_mode = PreemptionMode.RECOMPUTE else: preemption_mode = PreemptionMode.SWAP if preemption_mode == PreemptionMode.RECOMPUTE: self._preempt_by_recompute(seq_group) elif preemption_mode == PreemptionMode.SWAP: self._preempt_by_swap(seq_group, blocks_to_swap_out) else: assert False, 'Invalid preemption mode.' def _preempt_by_recompute( self, seq_group: SequenceGroup, ) -> None: seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING) assert len(seqs) == 1 for seq in seqs: seq.status = SequenceStatus.WAITING self.block_manager.free(seq) # NOTE: For FCFS, we insert the preempted sequence group to the front # of the waiting queue. self.waiting.insert(0, seq_group) def _preempt_by_swap( self, seq_group: SequenceGroup, blocks_to_swap_out: Dict[int, int], ) -> None: seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING) for seq in seqs: seq.status = SequenceStatus.SWAPPED self._swap_out(seq_group, blocks_to_swap_out) self.swapped.append(seq_group) def _swap_in( self, seq_group: SequenceGroup, blocks_to_swap_in: Dict[int, int], ) -> None: mapping = self.block_manager.swap_in(seq_group) blocks_to_swap_in.update(mapping) for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED): seq.status = SequenceStatus.RUNNING def _swap_out( self, seq_group: SequenceGroup, blocks_to_swap_out: Dict[int, int], ) -> None: assert self.block_manager.can_swap_out(seq_group) mapping = self.block_manager.swap_out(seq_group) blocks_to_swap_out.update(mapping) for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): seq.status = SequenceStatus.SWAPPED