2024-05-22 15:55:56 -07:00
|
|
|
from unittest.mock import patch
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
import torch
|
|
|
|
|
2024-06-03 23:32:57 -04:00
|
|
|
from tests.kernels.utils import (STR_FLASH_ATTN_VAL, STR_INVALID_VAL,
|
|
|
|
override_backend_env_variable)
|
2024-05-22 15:55:56 -07:00
|
|
|
from vllm.attention.selector import which_attn_to_use
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
2024-06-28 17:50:16 +04:00
|
|
|
"name", ["TORCH_SDPA", "ROCM_FLASH", "XFORMERS", "FLASHINFER", "OPENVINO"])
|
|
|
|
@pytest.mark.parametrize("device", ["cpu", "openvino", "hip", "cuda"])
|
2024-06-03 23:32:57 -04:00
|
|
|
def test_env(name: str, device: str, monkeypatch):
|
2024-05-22 15:55:56 -07:00
|
|
|
"""Test that the attention selector can be set via environment variable.
|
|
|
|
Note that we do not test FlashAttn because it is the default backend.
|
|
|
|
"""
|
2024-06-03 23:32:57 -04:00
|
|
|
|
|
|
|
override_backend_env_variable(monkeypatch, name)
|
2024-05-22 15:55:56 -07:00
|
|
|
|
|
|
|
if device == "cpu":
|
|
|
|
with patch("vllm.attention.selector.is_cpu", return_value=True):
|
|
|
|
backend = which_attn_to_use(8, 16, 8, None, torch.float16,
|
|
|
|
torch.float16, 16)
|
|
|
|
assert backend.name == "TORCH_SDPA"
|
|
|
|
elif device == "hip":
|
|
|
|
with patch("vllm.attention.selector.is_hip", return_value=True):
|
|
|
|
backend = which_attn_to_use(8, 16, 8, None, torch.float16,
|
|
|
|
torch.float16, 16)
|
|
|
|
assert backend.name == "ROCM_FLASH"
|
2024-06-28 17:50:16 +04:00
|
|
|
elif device == "openvino":
|
|
|
|
with patch("vllm.attention.selector.is_openvino", return_value=True):
|
|
|
|
backend = which_attn_to_use(8, 16, 8, None, torch.float16,
|
|
|
|
torch.float16, 16)
|
|
|
|
assert backend.name == "OPENVINO"
|
2024-05-22 15:55:56 -07:00
|
|
|
else:
|
|
|
|
backend = which_attn_to_use(8, 16, 8, None, torch.float16,
|
|
|
|
torch.float16, 16)
|
|
|
|
assert backend.name == name
|
|
|
|
|
|
|
|
|
2024-06-03 23:32:57 -04:00
|
|
|
def test_flash_attn(monkeypatch):
|
2024-05-22 15:55:56 -07:00
|
|
|
"""Test FlashAttn validation."""
|
2024-06-03 23:32:57 -04:00
|
|
|
|
|
|
|
override_backend_env_variable(monkeypatch, STR_FLASH_ATTN_VAL)
|
2024-05-22 15:55:56 -07:00
|
|
|
|
|
|
|
# Unsupported CUDA arch
|
|
|
|
with patch("torch.cuda.get_device_capability", return_value=[7, 5]):
|
|
|
|
backend = which_attn_to_use(8, 16, 8, None, torch.float16, None, 16)
|
|
|
|
assert backend.name != "FLASH_ATTN"
|
|
|
|
|
|
|
|
# Unsupported data type
|
|
|
|
backend = which_attn_to_use(8, 16, 8, None, torch.float8_e4m3fn, None, 16)
|
|
|
|
assert backend.name != "FLASH_ATTN"
|
|
|
|
|
|
|
|
# Unsupported kv cache data type
|
|
|
|
backend = which_attn_to_use(8, 16, 8, None, torch.float16, "fp8", 16)
|
|
|
|
assert backend.name != "FLASH_ATTN"
|
|
|
|
|
|
|
|
# Unsupported block size
|
|
|
|
backend = which_attn_to_use(8, 16, 8, None, torch.float16, None, 8)
|
|
|
|
assert backend.name != "FLASH_ATTN"
|
|
|
|
|
|
|
|
# Unsupported sliding window
|
|
|
|
backend = which_attn_to_use(8, 16, 8, 1, torch.float16, None, 16)
|
|
|
|
assert backend.name != "FLASH_ATTN"
|
|
|
|
|
|
|
|
# flash-attn is not installed
|
|
|
|
with patch.dict('sys.modules', {'vllm_flash_attn': None}):
|
|
|
|
backend = which_attn_to_use(8, 16, 8, None, torch.float16, None, 16)
|
|
|
|
assert backend.name != "FLASH_ATTN"
|
|
|
|
|
|
|
|
# Unsupported head size
|
|
|
|
backend = which_attn_to_use(8, 17, 8, None, torch.float16, None, 16)
|
|
|
|
assert backend.name != "FLASH_ATTN"
|
|
|
|
|
|
|
|
|
2024-06-03 23:32:57 -04:00
|
|
|
def test_invalid_env(monkeypatch):
|
2024-05-22 15:55:56 -07:00
|
|
|
"""Throw an exception if the backend name is invalid."""
|
2024-06-03 23:32:57 -04:00
|
|
|
override_backend_env_variable(monkeypatch, STR_INVALID_VAL)
|
2024-05-22 15:55:56 -07:00
|
|
|
with pytest.raises(ValueError):
|
|
|
|
which_attn_to_use(8, 16, 8, None, torch.float16, None, 16)
|