From 7c1f7600248a0a0497a5c512ef0ee262577c5f7a Mon Sep 17 00:00:00 2001 From: yarongmu-google <150371854+yarongmu-google@users.noreply.github.com> Date: Fri, 28 Mar 2025 21:13:15 -0700 Subject: [PATCH] [Kernel][TPU][ragged-paged-attn] vLLM code change for PR#8896 (#15659) Signed-off-by: Yarong Mu --- requirements/tpu.txt | 12 ++++---- vllm/v1/attention/backends/pallas.py | 43 ++++++++++++++-------------- vllm/v1/worker/tpu_model_runner.py | 11 ++++--- vllm/v1/worker/tpu_worker.py | 8 +++--- 4 files changed, 37 insertions(+), 37 deletions(-) diff --git a/requirements/tpu.txt b/requirements/tpu.txt index 35d5db6c..1930eacb 100644 --- a/requirements/tpu.txt +++ b/requirements/tpu.txt @@ -17,9 +17,9 @@ ray[data] --find-links https://storage.googleapis.com/libtpu-releases/index.html --find-links https://storage.googleapis.com/jax-releases/jax_nightly_releases.html --find-links https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html -torch @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch-2.8.0.dev20250319-cp39-cp39-linux_x86_64.whl ; python_version == "3.9" -torch @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch-2.8.0.dev20250319-cp310-cp310-linux_x86_64.whl ; python_version == "3.10" -torch @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch-2.8.0.dev20250319-cp311-cp311-linux_x86_64.whl ; python_version == "3.11" -torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250319-cp39-cp39-linux_x86_64.whl ; python_version == "3.9" -torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250319-cp310-cp310-linux_x86_64.whl ; python_version == "3.10" -torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250319-cp311-cp311-linux_x86_64.whl ; python_version == "3.11" +torch @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch-2.8.0.dev20250328-cp39-cp39-linux_x86_64.whl ; python_version == "3.9" +torch @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch-2.8.0.dev20250328-cp310-cp310-linux_x86_64.whl ; python_version == "3.10" +torch @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch-2.8.0.dev20250328-cp311-cp311-linux_x86_64.whl ; python_version == "3.11" +torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250328-cp39-cp39-linux_x86_64.whl ; python_version == "3.9" +torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250328-cp310-cp310-linux_x86_64.whl ; python_version == "3.10" +torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250328-cp311-cp311-linux_x86_64.whl ; python_version == "3.11" diff --git a/vllm/v1/attention/backends/pallas.py b/vllm/v1/attention/backends/pallas.py index 14d3664d..2f86920e 100644 --- a/vllm/v1/attention/backends/pallas.py +++ b/vllm/v1/attention/backends/pallas.py @@ -41,7 +41,7 @@ class PallasAttentionBackend(AttentionBackend): num_kv_heads: int, head_size: int, ) -> tuple[int, ...]: - return (num_blocks, block_size, num_kv_heads * head_size) + return (num_blocks, block_size, num_kv_heads * 2, head_size) @staticmethod def swap_blocks( @@ -132,7 +132,7 @@ class PallasAttentionBackendImpl(AttentionImpl): query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, - kv_cache: tuple[torch.Tensor, torch.Tensor], + kv_cache: torch.Tensor, attn_metadata: PallasMetadata, output: Optional[torch.Tensor] = None, ) -> torch.Tensor: @@ -142,14 +142,13 @@ class PallasAttentionBackendImpl(AttentionImpl): query: shape = [num_tokens, num_heads * head_size] key: shape = [num_tokens, num_kv_heads * head_size] value: shape = [num_tokens, num_kv_heads * head_size] - kv_cache = ([num_blocks, block_size, num_kv_heads * head_size], - [num_blocks, block_size, num_kv_heads * head_size]) + kv_cache = [num_blocks, block_size, num_kv_heads * 2, head_size] attn_metadata: Metadata for attention. Returns: shape = [num_tokens, num_heads * head_size] """ # For determine_available_memory case. - if kv_cache[0].numel() == 0: + if kv_cache.numel() == 0: if output is None: output = torch.ones_like(query) return output @@ -158,15 +157,13 @@ class PallasAttentionBackendImpl(AttentionImpl): num_tokens, hidden_size = query.shape query = query.view(num_tokens, self.num_heads, self.head_size) - key_cache, value_cache = kv_cache - if kv_cache[0].numel() > 0: + if kv_cache.numel() > 0: slot_mapping = attn_metadata.slot_mapping - write_to_kv_cache(key, value, key_cache, value_cache, slot_mapping) + write_to_kv_cache(key, value, kv_cache, slot_mapping) output = torch.ops.xla.ragged_paged_attention( query, - key_cache, - value_cache, + kv_cache, attn_metadata.context_lens, attn_metadata.block_tables, attn_metadata.query_start_loc, @@ -183,23 +180,27 @@ class PallasAttentionBackendImpl(AttentionImpl): def write_to_kv_cache( key: torch.Tensor, value: torch.Tensor, - key_cache: torch.Tensor, - value_cache: torch.Tensor, + kv_cache: torch.Tensor, slot_mapping: torch.Tensor, ) -> None: """ Write the key and values to the KV cache. Args: key: shape = [num_tokens, num_kv_heads * head_size] - value: shape = [num_tokens, num_kv_heads * head_size] - k_cache = [num_blocks, block_size, num_kv_heads * head_size] - v_cache = [num_blocks, block_size, num_kv_heads * head_size] + value: shape = [num_tokens, num_kv_heads * head_size] + kv_cache = [num_blocks, block_size, num_kv_heads * 2, head_size] """ - torch.ops.xla.dynamo_set_buffer_donor_(key_cache, True) - torch.ops.xla.dynamo_set_buffer_donor_(value_cache, True) + _, _, num_combined_kv_heads, head_size = kv_cache.shape + num_kv_heads = num_combined_kv_heads // 2 - key_cache = key_cache.flatten(0, 1) - value_cache = value_cache.flatten(0, 1) - key_cache.index_copy_(0, slot_mapping, key) - value_cache.index_copy_(0, slot_mapping, value) + key = key.view(-1, num_kv_heads, head_size) + value = value.view(-1, num_kv_heads, head_size) + + kv = torch.cat([key, value], axis=-1).reshape(-1, num_combined_kv_heads, + head_size) + + torch.ops.xla.dynamo_set_buffer_donor_(kv_cache, True) + + kv_cache = kv_cache.flatten(0, 1) + kv_cache.index_copy_(0, slot_mapping, kv) diff --git a/vllm/v1/worker/tpu_model_runner.py b/vllm/v1/worker/tpu_model_runner.py index 773cd971..ea5a1701 100644 --- a/vllm/v1/worker/tpu_model_runner.py +++ b/vllm/v1/worker/tpu_model_runner.py @@ -861,12 +861,11 @@ class TPUModelRunner: kv_cache_spec.num_kv_heads, kv_cache_spec.head_size) dtype = kv_cache_spec.dtype - tpu_k_cache = torch.zeros(kv_cache_shape, - dtype=dtype, - device=self.device) - tpu_v_cache = torch.zeros_like(tpu_k_cache) + tpu_kv_cache = torch.zeros(kv_cache_shape, + dtype=dtype, + device=self.device) - kv_caches[layer_name] = (tpu_k_cache, tpu_v_cache) + kv_caches[layer_name] = tpu_kv_cache else: raise NotImplementedError @@ -893,7 +892,7 @@ class ModelWrapperV1(nn.Module): self, input_ids: torch.Tensor, positions: torch.Tensor, - kv_caches: list[tuple[torch.Tensor, torch.Tensor]], + kv_caches: list[torch.Tensor], inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Executes the forward pass of the model. diff --git a/vllm/v1/worker/tpu_worker.py b/vllm/v1/worker/tpu_worker.py index b51bd20f..9add8cee 100644 --- a/vllm/v1/worker/tpu_worker.py +++ b/vllm/v1/worker/tpu_worker.py @@ -136,10 +136,10 @@ class TPUWorker: # Use an empty tensor instead of `None`` to force Dynamo to pass # it by reference, rather by specializing on the value ``None``. - tpu_k_cache = torch.tensor([], dtype=dtype, device=self.device) - tpu_v_cache = torch.tensor([], dtype=dtype, device=self.device) - - kv_caches[layer_name] = (tpu_k_cache, tpu_v_cache) + tpu_kv_cache = torch.tensor([], + dtype=dtype, + device=self.device) + kv_caches[layer_name] = tpu_kv_cache else: raise NotImplementedError