vllm/tests/kernels/test_activation.py

80 lines
2.5 KiB
Python
Raw Normal View History

2024-02-21 20:17:52 -08:00
from typing import Type
2023-09-06 08:57:38 +09:00
import pytest
2023-04-02 00:30:17 -07:00
import torch
2023-09-06 08:57:38 +09:00
2024-02-21 20:17:52 -08:00
from vllm.model_executor.layers.activation import (FastGELU, GeluAndMul,
NewGELU, SiluAndMul)
2023-04-02 00:30:17 -07:00
from .allclose_default import get_default_atol, get_default_rtol
2023-09-06 08:57:38 +09:00
DTYPES = [torch.half, torch.bfloat16, torch.float]
NUM_TOKENS = [7, 83, 2048] # Arbitrary values for testing
D = [512, 4096, 5120, 13824] # Arbitrary values for testing
SEEDS = [0]
CUDA_DEVICES = [
f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)
]
2023-09-06 08:57:38 +09:00
2023-04-02 00:30:17 -07:00
@pytest.mark.parametrize("activation", ["silu", "gelu", "gelu_tanh"])
2023-09-06 08:57:38 +09:00
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("d", D)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("device", CUDA_DEVICES)
2023-04-02 00:30:17 -07:00
@torch.inference_mode()
2024-02-21 20:17:52 -08:00
def test_act_and_mul(
activation: str,
2023-04-02 00:30:17 -07:00
num_tokens: int,
d: int,
dtype: torch.dtype,
2023-09-06 08:57:38 +09:00
seed: int,
device: str,
2023-04-02 00:30:17 -07:00
) -> None:
2023-09-06 08:57:38 +09:00
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.set_default_device(device)
x = torch.randn(num_tokens, 2 * d, dtype=dtype)
if activation == "silu":
layer = SiluAndMul()
elif activation == "gelu":
layer = GeluAndMul(approximate="none")
elif activation == "gelu_tanh":
layer = GeluAndMul(approximate="tanh")
out = layer(x)
ref_out = layer._forward(x)
2024-02-21 20:17:52 -08:00
# The SiLU and GELU implementations are equivalent to the native PyTorch
# implementations, so we can do exact comparison.
assert torch.allclose(out, ref_out, atol=0.0, rtol=0.0)
2023-04-02 00:30:17 -07:00
2024-02-21 20:17:52 -08:00
@pytest.mark.parametrize("activation", [FastGELU, NewGELU])
2023-09-06 08:57:38 +09:00
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("d", D)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("device", CUDA_DEVICES)
@torch.inference_mode()
2024-02-21 20:17:52 -08:00
def test_activation(
activation: Type[torch.nn.Module],
num_tokens: int,
d: int,
dtype: torch.dtype,
2023-09-06 08:57:38 +09:00
seed: int,
device: str,
) -> None:
2023-09-06 08:57:38 +09:00
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.set_default_device(device)
x = torch.randn(num_tokens, d, dtype=dtype)
2024-02-21 20:17:52 -08:00
layer = activation()
out = layer(x)
ref_out = layer._forward(x)
assert torch.allclose(out,
ref_out,
atol=get_default_atol(out),
rtol=get_default_rtol(out))