2023-02-18 19:23:07 +00:00
|
|
|
import random
|
2024-06-15 12:45:31 +08:00
|
|
|
from typing import List, Tuple
|
2023-02-18 19:23:07 +00:00
|
|
|
|
2023-09-06 08:57:38 +09:00
|
|
|
import pytest
|
2023-02-18 19:23:07 +00:00
|
|
|
import torch
|
|
|
|
|
2024-04-11 03:26:07 +00:00
|
|
|
from vllm import _custom_ops as ops
|
2023-02-18 19:23:07 +00:00
|
|
|
|
2024-01-30 18:30:50 +01:00
|
|
|
COPYING_DIRECTION = [('cuda', 'cpu'), ('cuda', 'cuda'), ('cpu', 'cuda')]
|
2023-09-06 08:57:38 +09:00
|
|
|
DTYPES = [torch.half, torch.bfloat16, torch.float]
|
2024-01-14 12:37:58 -08:00
|
|
|
NUM_TOKENS = [42] # Arbitrary values for testing
|
2023-10-31 15:19:30 -07:00
|
|
|
NUM_LAYERS = [1] # Arbitrary values for testing
|
2023-09-06 08:57:38 +09:00
|
|
|
NUM_HEADS = [8] # Arbitrary values for testing
|
2024-05-31 10:24:41 +08:00
|
|
|
HEAD_SIZES = [64, 80, 96, 112, 128, 192, 256]
|
2023-09-06 08:57:38 +09:00
|
|
|
BLOCK_SIZES = [8, 16, 32]
|
2024-03-24 16:03:06 -07:00
|
|
|
|
|
|
|
# Arbitrary values for testing
|
|
|
|
# don't make it too large. e.g. [1024, 36000] will OOM
|
|
|
|
NUM_BLOCKS = [1024, 10000]
|
|
|
|
|
2023-10-31 15:19:30 -07:00
|
|
|
NUM_MAPPINGS = [256] # Arbitrary values for testing
|
2023-09-06 08:57:38 +09:00
|
|
|
SEEDS = [0]
|
2024-02-02 07:46:39 +08:00
|
|
|
CUDA_DEVICES = [
|
|
|
|
f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)
|
|
|
|
]
|
2024-05-09 17:04:17 -07:00
|
|
|
|
|
|
|
# We assume fp8 is always enabled for testing.
|
2024-04-03 16:15:55 -05:00
|
|
|
KV_CACHE_DTYPE = ["auto", "fp8"]
|
2023-09-06 08:57:38 +09:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("num_mappings", NUM_MAPPINGS)
|
|
|
|
@pytest.mark.parametrize("num_layers", NUM_LAYERS)
|
|
|
|
@pytest.mark.parametrize("num_heads", NUM_HEADS)
|
|
|
|
@pytest.mark.parametrize("head_size", HEAD_SIZES)
|
|
|
|
@pytest.mark.parametrize("block_size", BLOCK_SIZES)
|
|
|
|
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
|
|
|
|
@pytest.mark.parametrize("dtype", DTYPES)
|
|
|
|
@pytest.mark.parametrize("seed", SEEDS)
|
2024-02-02 07:46:39 +08:00
|
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
2024-01-29 08:43:54 +08:00
|
|
|
@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPE)
|
2023-05-17 17:11:23 -07:00
|
|
|
@torch.inference_mode()
|
2023-09-06 08:57:38 +09:00
|
|
|
def test_copy_blocks(
|
|
|
|
kv_cache_factory,
|
2023-04-07 17:45:07 -07:00
|
|
|
num_mappings: int,
|
|
|
|
num_layers: int,
|
|
|
|
num_heads: int,
|
|
|
|
head_size: int,
|
|
|
|
block_size: int,
|
|
|
|
num_blocks: int,
|
|
|
|
dtype: torch.dtype,
|
2023-09-06 08:57:38 +09:00
|
|
|
seed: int,
|
2024-01-29 08:43:54 +08:00
|
|
|
kv_cache_dtype: str,
|
2024-02-02 07:46:39 +08:00
|
|
|
device: str,
|
2023-04-07 17:45:07 -07:00
|
|
|
) -> None:
|
2023-09-06 08:57:38 +09:00
|
|
|
random.seed(seed)
|
|
|
|
torch.random.manual_seed(seed)
|
2024-02-02 07:46:39 +08:00
|
|
|
if torch.cuda.is_available():
|
|
|
|
torch.cuda.manual_seed(seed)
|
|
|
|
torch.set_default_device(device)
|
2023-09-06 08:57:38 +09:00
|
|
|
# Generate random block mappings where each source block is mapped to two
|
|
|
|
# destination blocks.
|
|
|
|
assert 2 * num_mappings <= num_blocks
|
2023-04-07 17:45:07 -07:00
|
|
|
src_blocks = random.sample(range(num_blocks), num_mappings)
|
|
|
|
remainig_blocks = list(set(range(num_blocks)) - set(src_blocks))
|
2023-09-06 08:57:38 +09:00
|
|
|
dst_blocks = random.sample(remainig_blocks, 2 * num_mappings)
|
2024-06-15 12:45:31 +08:00
|
|
|
block_mapping: List[Tuple[int, int]] = []
|
2023-09-06 08:57:38 +09:00
|
|
|
for i in range(num_mappings):
|
2024-01-03 17:32:05 -08:00
|
|
|
src = src_blocks[i]
|
|
|
|
dst1 = dst_blocks[2 * i]
|
|
|
|
dst2 = dst_blocks[2 * i + 1]
|
2024-05-06 21:30:27 -07:00
|
|
|
block_mapping.append((src, dst1))
|
|
|
|
block_mapping.append((src, dst2))
|
2023-09-06 08:57:38 +09:00
|
|
|
|
|
|
|
# Create the KV caches.
|
|
|
|
key_caches, value_caches = kv_cache_factory(num_blocks, block_size,
|
|
|
|
num_layers, num_heads,
|
2024-01-29 08:43:54 +08:00
|
|
|
head_size, kv_cache_dtype,
|
2024-02-02 07:46:39 +08:00
|
|
|
dtype, seed, device)
|
2023-09-06 08:57:38 +09:00
|
|
|
|
|
|
|
# Clone the KV caches.
|
|
|
|
cloned_key_caches = [key_cache.clone() for key_cache in key_caches]
|
|
|
|
cloned_value_caches = [value_cache.clone() for value_cache in value_caches]
|
2023-04-07 17:45:07 -07:00
|
|
|
|
|
|
|
# Call the copy blocks kernel.
|
2024-05-06 21:30:27 -07:00
|
|
|
block_mapping_tensor = torch.tensor(block_mapping,
|
|
|
|
dtype=torch.int64,
|
|
|
|
device=device).view(-1, 2)
|
|
|
|
ops.copy_blocks(key_caches, value_caches, block_mapping_tensor)
|
2023-04-07 17:45:07 -07:00
|
|
|
|
2023-09-06 08:57:38 +09:00
|
|
|
# Run the reference implementation.
|
2024-05-06 21:30:27 -07:00
|
|
|
for src, dst in block_mapping:
|
|
|
|
for cloned_key_cache in cloned_key_caches:
|
|
|
|
cloned_key_cache[dst].copy_(cloned_key_cache[src])
|
|
|
|
for cloned_value_cache in cloned_value_caches:
|
|
|
|
cloned_value_cache[dst].copy_(cloned_value_cache[src])
|
2023-04-07 17:45:07 -07:00
|
|
|
|
|
|
|
# Compare the results.
|
|
|
|
for key_cache, cloned_key_cache in zip(key_caches, cloned_key_caches):
|
|
|
|
assert torch.allclose(key_cache, cloned_key_cache)
|
2023-07-03 11:31:55 -07:00
|
|
|
for value_cache, cloned_value_cache in zip(value_caches,
|
|
|
|
cloned_value_caches):
|
2023-04-07 17:45:07 -07:00
|
|
|
assert torch.allclose(value_cache, cloned_value_cache)
|
|
|
|
|
|
|
|
|
2023-09-06 08:57:38 +09:00
|
|
|
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
|
|
|
|
@pytest.mark.parametrize("num_heads", NUM_HEADS)
|
|
|
|
@pytest.mark.parametrize("head_size", HEAD_SIZES)
|
|
|
|
@pytest.mark.parametrize("block_size", BLOCK_SIZES)
|
|
|
|
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
|
|
|
|
@pytest.mark.parametrize("dtype", DTYPES)
|
|
|
|
@pytest.mark.parametrize("seed", SEEDS)
|
2024-02-02 07:46:39 +08:00
|
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
2024-04-03 16:15:55 -05:00
|
|
|
@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPE)
|
2023-05-17 17:11:23 -07:00
|
|
|
@torch.inference_mode()
|
2023-09-06 08:57:38 +09:00
|
|
|
def test_reshape_and_cache(
|
|
|
|
kv_cache_factory,
|
2023-02-18 19:23:07 +00:00
|
|
|
num_tokens: int,
|
|
|
|
num_heads: int,
|
|
|
|
head_size: int,
|
|
|
|
block_size: int,
|
|
|
|
num_blocks: int,
|
|
|
|
dtype: torch.dtype,
|
2023-09-06 08:57:38 +09:00
|
|
|
seed: int,
|
2024-02-02 07:46:39 +08:00
|
|
|
device: str,
|
2024-04-03 16:15:55 -05:00
|
|
|
kv_cache_dtype: str,
|
2023-02-18 19:23:07 +00:00
|
|
|
) -> None:
|
2023-09-06 08:57:38 +09:00
|
|
|
random.seed(seed)
|
|
|
|
torch.random.manual_seed(seed)
|
2024-02-02 07:46:39 +08:00
|
|
|
if torch.cuda.is_available():
|
|
|
|
torch.cuda.manual_seed(seed)
|
|
|
|
torch.set_default_device(device)
|
2023-09-06 08:57:38 +09:00
|
|
|
# Create a random slot mapping.
|
2023-02-18 19:23:07 +00:00
|
|
|
num_slots = block_size * num_blocks
|
2024-06-15 12:45:31 +08:00
|
|
|
slot_mapping_lst = random.sample(range(num_slots), num_tokens)
|
|
|
|
slot_mapping = torch.tensor(slot_mapping_lst, dtype=torch.long)
|
2024-02-02 07:46:39 +08:00
|
|
|
|
|
|
|
qkv = torch.randn(num_tokens, 3, num_heads, head_size, dtype=dtype)
|
2023-04-02 00:30:17 -07:00
|
|
|
_, key, value = qkv.unbind(dim=1)
|
|
|
|
|
2023-09-06 08:57:38 +09:00
|
|
|
# Create the KV caches.
|
|
|
|
key_caches, value_caches = kv_cache_factory(num_blocks, block_size, 1,
|
2024-04-03 16:15:55 -05:00
|
|
|
num_heads, head_size,
|
|
|
|
kv_cache_dtype, dtype, seed,
|
|
|
|
device)
|
2023-09-06 08:57:38 +09:00
|
|
|
key_cache, value_cache = key_caches[0], value_caches[0]
|
2023-02-18 19:23:07 +00:00
|
|
|
|
2023-09-06 08:57:38 +09:00
|
|
|
# Clone the KV caches.
|
2024-04-03 16:15:55 -05:00
|
|
|
if kv_cache_dtype == "fp8":
|
|
|
|
cloned_key_cache = torch.empty_like(key_cache, dtype=torch.float16)
|
2024-05-09 17:04:17 -07:00
|
|
|
ops.convert_fp8(cloned_key_cache, key_cache)
|
2024-04-03 16:15:55 -05:00
|
|
|
cloned_value_cache = torch.empty_like(value_cache, dtype=torch.float16)
|
2024-05-09 17:04:17 -07:00
|
|
|
ops.convert_fp8(cloned_value_cache, value_cache)
|
2024-04-03 16:15:55 -05:00
|
|
|
else:
|
|
|
|
cloned_key_cache = key_cache.clone()
|
|
|
|
cloned_value_cache = value_cache.clone()
|
|
|
|
|
|
|
|
# Using default kv_scale
|
|
|
|
kv_scale = 1.0
|
2023-02-18 19:23:07 +00:00
|
|
|
|
2023-09-06 08:57:38 +09:00
|
|
|
# Call the reshape_and_cache kernel.
|
2024-04-11 03:26:07 +00:00
|
|
|
ops.reshape_and_cache(key, value, key_cache, value_cache, slot_mapping,
|
|
|
|
kv_cache_dtype, kv_scale)
|
2024-04-03 16:15:55 -05:00
|
|
|
|
|
|
|
if kv_cache_dtype == "fp8":
|
|
|
|
result_key_cache = torch.empty_like(key_cache, dtype=torch.float16)
|
2024-05-09 17:04:17 -07:00
|
|
|
ops.convert_fp8(result_key_cache, key_cache)
|
2024-04-03 16:15:55 -05:00
|
|
|
result_value_cache = torch.empty_like(value_cache, dtype=torch.float16)
|
2024-05-09 17:04:17 -07:00
|
|
|
ops.convert_fp8(result_value_cache, value_cache)
|
2023-02-18 19:23:07 +00:00
|
|
|
|
2023-09-06 08:57:38 +09:00
|
|
|
# Run the reference implementation.
|
|
|
|
reshaped_key = key.reshape(num_tokens, *key_cache[0, :, :, 0, :].shape)
|
2023-10-02 15:36:09 -07:00
|
|
|
block_indicies = torch.div(slot_mapping, block_size, rounding_mode="floor")
|
2024-06-15 12:45:31 +08:00
|
|
|
block_indicies_lst = block_indicies.cpu().tolist()
|
2023-09-06 08:57:38 +09:00
|
|
|
block_offsets = slot_mapping % block_size
|
2024-06-15 12:45:31 +08:00
|
|
|
block_offsets_lst = block_offsets.cpu().tolist()
|
2023-02-18 19:23:07 +00:00
|
|
|
for i in range(num_tokens):
|
2024-06-15 12:45:31 +08:00
|
|
|
block_idx = block_indicies_lst[i]
|
|
|
|
block_offset = block_offsets_lst[i]
|
2023-02-18 19:23:07 +00:00
|
|
|
cloned_key_cache[block_idx, :, :, block_offset, :] = reshaped_key[i]
|
2023-03-01 15:02:19 -08:00
|
|
|
cloned_value_cache[block_idx, :, :, block_offset] = value[i]
|
2023-02-18 19:23:07 +00:00
|
|
|
|
2024-04-03 16:15:55 -05:00
|
|
|
if kv_cache_dtype == "fp8":
|
|
|
|
assert torch.allclose(result_key_cache,
|
|
|
|
cloned_key_cache,
|
|
|
|
atol=0.001,
|
|
|
|
rtol=0.1)
|
|
|
|
assert torch.allclose(result_value_cache,
|
|
|
|
cloned_value_cache,
|
|
|
|
atol=0.001,
|
|
|
|
rtol=0.1)
|
|
|
|
else:
|
|
|
|
assert torch.allclose(key_cache, cloned_key_cache)
|
|
|
|
assert torch.allclose(value_cache, cloned_value_cache)
|
2024-01-30 18:30:50 +01:00
|
|
|
|
|
|
|
|
2024-05-03 15:51:27 -07:00
|
|
|
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
|
|
|
|
@pytest.mark.parametrize("num_heads", NUM_HEADS)
|
|
|
|
@pytest.mark.parametrize("head_size", HEAD_SIZES)
|
|
|
|
@pytest.mark.parametrize("block_size", BLOCK_SIZES)
|
|
|
|
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
|
|
|
|
@pytest.mark.parametrize("dtype", DTYPES)
|
|
|
|
@pytest.mark.parametrize("seed", SEEDS)
|
|
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
|
|
|
@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPE)
|
|
|
|
@torch.inference_mode()
|
|
|
|
def test_reshape_and_cache_flash(
|
|
|
|
kv_cache_factory_flashinfer,
|
|
|
|
num_tokens: int,
|
|
|
|
num_heads: int,
|
|
|
|
head_size: int,
|
|
|
|
block_size: int,
|
|
|
|
num_blocks: int,
|
|
|
|
dtype: torch.dtype,
|
|
|
|
seed: int,
|
|
|
|
device: str,
|
|
|
|
kv_cache_dtype: str,
|
|
|
|
) -> None:
|
|
|
|
if kv_cache_dtype == "fp8":
|
|
|
|
pytest.skip()
|
|
|
|
random.seed(seed)
|
|
|
|
torch.random.manual_seed(seed)
|
|
|
|
torch.cuda.manual_seed(seed)
|
2024-05-08 13:14:02 -07:00
|
|
|
torch.set_default_device(device)
|
2024-05-03 15:51:27 -07:00
|
|
|
|
|
|
|
# Create a random slot mapping.
|
|
|
|
num_slots = block_size * num_blocks
|
2024-06-15 12:45:31 +08:00
|
|
|
slot_mapping_lst = random.sample(range(num_slots), num_tokens)
|
|
|
|
slot_mapping = torch.tensor(slot_mapping_lst,
|
|
|
|
dtype=torch.long,
|
|
|
|
device=device)
|
2024-05-03 15:51:27 -07:00
|
|
|
|
|
|
|
qkv = torch.randn(num_tokens,
|
|
|
|
3,
|
|
|
|
num_heads,
|
|
|
|
head_size,
|
|
|
|
dtype=dtype,
|
|
|
|
device=device)
|
|
|
|
_, key, value = qkv.unbind(dim=1)
|
|
|
|
|
|
|
|
# Create the KV caches.
|
|
|
|
key_caches, value_caches = kv_cache_factory_flashinfer(
|
|
|
|
num_blocks,
|
|
|
|
block_size,
|
|
|
|
1,
|
|
|
|
num_heads,
|
|
|
|
head_size,
|
|
|
|
kv_cache_dtype,
|
|
|
|
dtype,
|
2024-05-08 13:14:02 -07:00
|
|
|
device=device,
|
2024-05-03 15:51:27 -07:00
|
|
|
)
|
|
|
|
key_cache, value_cache = key_caches[0], value_caches[0]
|
|
|
|
|
|
|
|
# Clone the KV caches.
|
|
|
|
cloned_key_cache = key_cache.clone()
|
|
|
|
cloned_value_cache = value_cache.clone()
|
|
|
|
|
|
|
|
# Call the reshape_and_cache kernel.
|
2024-05-09 17:04:17 -07:00
|
|
|
ops.reshape_and_cache_flash(key, value, key_cache, value_cache,
|
|
|
|
slot_mapping, kv_cache_dtype)
|
2024-05-03 15:51:27 -07:00
|
|
|
|
|
|
|
# Run the reference implementation.
|
2024-06-15 12:45:31 +08:00
|
|
|
block_indicies = torch.div(slot_mapping, block_size, rounding_mode="floor")
|
|
|
|
block_indicies_lst = block_indicies.cpu().tolist()
|
2024-05-03 15:51:27 -07:00
|
|
|
block_offsets = slot_mapping % block_size
|
2024-06-15 12:45:31 +08:00
|
|
|
block_offsets_lst = block_offsets.cpu().tolist()
|
2024-05-03 15:51:27 -07:00
|
|
|
for i in range(num_tokens):
|
2024-06-15 12:45:31 +08:00
|
|
|
block_idx = block_indicies_lst[i]
|
|
|
|
block_offset = block_offsets_lst[i]
|
2024-05-03 15:51:27 -07:00
|
|
|
cloned_key_cache[block_idx, block_offset, :, :] = key[i]
|
|
|
|
cloned_value_cache[block_idx, block_offset, :, :] = value[i]
|
|
|
|
|
|
|
|
assert torch.allclose(key_cache, cloned_key_cache)
|
|
|
|
assert torch.allclose(value_cache, cloned_value_cache)
|
|
|
|
|
|
|
|
|
2024-01-30 18:30:50 +01:00
|
|
|
@pytest.mark.parametrize("direction", COPYING_DIRECTION)
|
|
|
|
@pytest.mark.parametrize("num_mappings", NUM_MAPPINGS)
|
|
|
|
@pytest.mark.parametrize("num_heads", NUM_HEADS)
|
|
|
|
@pytest.mark.parametrize("head_size", HEAD_SIZES)
|
|
|
|
@pytest.mark.parametrize("block_size", BLOCK_SIZES)
|
|
|
|
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
|
|
|
|
@pytest.mark.parametrize("dtype", DTYPES)
|
|
|
|
@pytest.mark.parametrize("seed", SEEDS)
|
2024-02-02 07:46:39 +08:00
|
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
2024-04-03 16:15:55 -05:00
|
|
|
@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPE)
|
2024-01-30 18:30:50 +01:00
|
|
|
@torch.inference_mode()
|
|
|
|
def test_swap_blocks(
|
|
|
|
kv_cache_factory,
|
|
|
|
direction: Tuple[str, str],
|
|
|
|
num_mappings: int,
|
|
|
|
num_heads: int,
|
|
|
|
head_size: int,
|
|
|
|
block_size: int,
|
|
|
|
num_blocks: int,
|
|
|
|
dtype: torch.dtype,
|
|
|
|
seed: int,
|
2024-02-06 11:38:38 -08:00
|
|
|
device: str,
|
2024-04-03 16:15:55 -05:00
|
|
|
kv_cache_dtype: str,
|
2024-01-30 18:30:50 +01:00
|
|
|
) -> None:
|
2024-04-03 16:15:55 -05:00
|
|
|
if kv_cache_dtype == "fp8" and "cpu" in direction:
|
|
|
|
pytest.skip()
|
2024-01-30 18:30:50 +01:00
|
|
|
random.seed(seed)
|
|
|
|
torch.random.manual_seed(seed)
|
2024-02-02 07:46:39 +08:00
|
|
|
if torch.cuda.is_available():
|
|
|
|
torch.cuda.manual_seed(seed)
|
2024-02-06 11:38:38 -08:00
|
|
|
|
|
|
|
src_device = device if direction[0] == "cuda" else 'cpu'
|
|
|
|
dst_device = device if direction[1] == "cuda" else 'cpu'
|
2024-01-30 18:30:50 +01:00
|
|
|
|
|
|
|
src_blocks = random.sample(range(num_blocks), num_mappings)
|
|
|
|
# For the same device, mapping must not overlap
|
|
|
|
if src_device == dst_device:
|
|
|
|
remaining_blocks = list(set(range(num_blocks)) - set(src_blocks))
|
|
|
|
dst_blocks = random.sample(remaining_blocks, num_mappings)
|
|
|
|
else:
|
|
|
|
dst_blocks = random.sample(range(num_blocks), num_mappings)
|
|
|
|
|
2024-05-08 12:07:05 -07:00
|
|
|
block_mapping = list(zip(src_blocks, dst_blocks))
|
|
|
|
block_mapping_tensor = torch.tensor(block_mapping,
|
|
|
|
dtype=torch.int64,
|
|
|
|
device="cpu").view(-1, 2)
|
2024-01-30 18:30:50 +01:00
|
|
|
|
|
|
|
# Create the KV caches on the first device.
|
|
|
|
src_key_caches, src_value_caches = kv_cache_factory(
|
2024-04-03 16:15:55 -05:00
|
|
|
num_blocks, block_size, 1, num_heads, head_size, kv_cache_dtype, dtype,
|
|
|
|
seed, src_device)
|
2024-01-30 18:30:50 +01:00
|
|
|
|
|
|
|
# Create the KV caches on the second device.
|
|
|
|
dist_key_caches, dist_value_caches = kv_cache_factory(
|
2024-04-03 16:15:55 -05:00
|
|
|
num_blocks, block_size, 1, num_heads, head_size, kv_cache_dtype, dtype,
|
|
|
|
seed, dst_device)
|
2024-01-30 18:30:50 +01:00
|
|
|
|
|
|
|
src_key_caches_clone = src_key_caches[0].clone()
|
|
|
|
src_value_caches_clone = src_value_caches[0].clone()
|
|
|
|
|
|
|
|
# Call the swap_blocks kernel.
|
2024-05-08 12:07:05 -07:00
|
|
|
ops.swap_blocks(src_key_caches[0], dist_key_caches[0],
|
|
|
|
block_mapping_tensor)
|
|
|
|
ops.swap_blocks(src_value_caches[0], dist_value_caches[0],
|
|
|
|
block_mapping_tensor)
|
2024-01-30 18:30:50 +01:00
|
|
|
|
2024-05-08 12:07:05 -07:00
|
|
|
for src, dst in block_mapping:
|
2024-01-30 18:30:50 +01:00
|
|
|
assert torch.allclose(src_key_caches_clone[src].cpu(),
|
|
|
|
dist_key_caches[0][dst].cpu())
|
|
|
|
assert torch.allclose(src_value_caches_clone[src].cpu(),
|
|
|
|
dist_value_caches[0][dst].cpu())
|
2024-04-03 16:15:55 -05:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("num_heads", NUM_HEADS)
|
|
|
|
@pytest.mark.parametrize("head_size", HEAD_SIZES)
|
|
|
|
@pytest.mark.parametrize("block_size", BLOCK_SIZES)
|
|
|
|
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
|
|
|
|
@pytest.mark.parametrize("dtype", DTYPES)
|
|
|
|
@pytest.mark.parametrize("seed", SEEDS)
|
|
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
|
|
|
@torch.inference_mode()
|
2024-05-09 17:04:17 -07:00
|
|
|
def test_fp8_e4m3_conversion(
|
2024-04-03 16:15:55 -05:00
|
|
|
num_heads: int,
|
|
|
|
head_size: int,
|
|
|
|
block_size: int,
|
|
|
|
num_blocks: int,
|
|
|
|
dtype: torch.dtype,
|
|
|
|
seed: int,
|
|
|
|
device: str,
|
|
|
|
) -> None:
|
|
|
|
random.seed(seed)
|
|
|
|
torch.random.manual_seed(seed)
|
|
|
|
torch.cuda.manual_seed(seed)
|
|
|
|
|
|
|
|
low = -224.0
|
|
|
|
high = 224.0
|
|
|
|
shape = (num_blocks, num_heads, head_size, block_size)
|
|
|
|
cache = torch.empty(shape, dtype=dtype, device=device)
|
|
|
|
cache.uniform_(low, high)
|
|
|
|
|
|
|
|
cache_fp8 = torch.empty_like(cache, dtype=torch.uint8)
|
2024-05-09 17:04:17 -07:00
|
|
|
ops.convert_fp8(cache_fp8, cache)
|
2024-04-03 16:15:55 -05:00
|
|
|
|
|
|
|
converted_cache = torch.empty_like(cache)
|
2024-05-09 17:04:17 -07:00
|
|
|
ops.convert_fp8(converted_cache, cache_fp8)
|
2024-04-03 16:15:55 -05:00
|
|
|
|
|
|
|
assert torch.allclose(cache, converted_cache, atol=0.001, rtol=0.1)
|