vllm/tests/kernels/test_layernorm.py

48 lines
1.7 KiB
Python
Raw Normal View History

2023-09-06 08:57:38 +09:00
import pytest
import torch
from vllm.model_executor.layers.layernorm import RMSNorm
2023-09-06 08:57:38 +09:00
DTYPES = [torch.half, torch.bfloat16, torch.float]
NUM_TOKENS = [7, 83, 4096] # Arbitrary values for testing
HIDDEN_SIZES = [768, 5120, 8192] # Arbitrary values for testing
ADD_RESIDUAL = [False, True]
2023-09-06 08:57:38 +09:00
SEEDS = [0]
2023-09-06 08:57:38 +09:00
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)
@pytest.mark.parametrize("add_residual", ADD_RESIDUAL)
2023-09-06 08:57:38 +09:00
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
@torch.inference_mode()
2023-09-06 08:57:38 +09:00
def test_rms_norm(
num_tokens: int,
hidden_size: int,
add_residual: bool,
dtype: torch.dtype,
2023-09-06 08:57:38 +09:00
seed: int,
) -> None:
2023-09-06 08:57:38 +09:00
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
layer = RMSNorm(hidden_size).to(dtype).cuda()
layer.weight.data.normal_(mean=1.0, std=0.1)
scale = 1 / (2 * hidden_size)
x = torch.randn(num_tokens, hidden_size, dtype=dtype, device="cuda")
x *= scale
residual = torch.randn_like(x) * scale if add_residual else None
# NOTE(woosuk): The reference implementation should be executed first
# because the custom kernel is in-place.
ref_out = layer._forward(x, residual)
out = layer(x, residual)
# NOTE(woosuk): LayerNorm operators (including RMS) typically have larger
# numerical errors than other operators because they involve reductions.
# Therefore, we use a larger tolerance.
if add_residual:
assert torch.allclose(out[0], ref_out[0], atol=1e-2, rtol=1e-2)
assert torch.allclose(out[1], ref_out[1], atol=1e-2, rtol=1e-2)
else:
assert torch.allclose(out, ref_out, atol=1e-2, rtol=1e-2)