vllm/setup.py

68 lines
2.2 KiB
Python
Raw Normal View History

2023-02-16 07:47:03 +00:00
import setuptools
2023-05-03 14:09:44 -07:00
import torch
2023-02-16 07:47:03 +00:00
from torch.utils import cpp_extension
CXX_FLAGS = ['-g']
NVCC_FLAGS = ['-O2']
2023-05-03 14:09:44 -07:00
if not torch.cuda.is_available():
raise RuntimeError(
f'Cannot find CUDA at CUDA_HOME: {cpp_extension.CUDA_HOME}. '
'CUDA must be available in order to build the package.')
# FIXME(woosuk): Consider the case where the machine has multiple GPUs with
# different compute capabilities.
compute_capability = torch.cuda.get_device_capability()
major, minor = compute_capability
# Enable bfloat16 support if the compute capability is >= 8.0.
if major >= 8:
NVCC_FLAGS.append('-DENABLE_BF16')
2023-02-16 07:47:03 +00:00
ext_modules = []
# Cache operations.
cache_extension = cpp_extension.CUDAExtension(
name='cacheflow.cache_ops',
2023-02-16 20:05:45 +00:00
sources=['csrc/cache.cpp', 'csrc/cache_kernels.cu'],
2023-02-16 07:47:03 +00:00
extra_compile_args={'cxx': CXX_FLAGS, 'nvcc': NVCC_FLAGS},
)
ext_modules.append(cache_extension)
# Attention kernels.
attention_extension = cpp_extension.CUDAExtension(
name='cacheflow.attention_ops',
2023-05-03 13:40:13 -07:00
sources=['csrc/attention.cpp', 'csrc/attention/attention_kernels.cu'],
extra_compile_args={'cxx': CXX_FLAGS, 'nvcc': NVCC_FLAGS},
)
ext_modules.append(attention_extension)
2023-05-03 14:09:44 -07:00
# Positional encoding kernels.
positional_encoding_extension = cpp_extension.CUDAExtension(
name='cacheflow.pos_encoding_ops',
sources=['csrc/pos_encoding.cpp', 'csrc/pos_encoding_kernels.cu'],
extra_compile_args={'cxx': CXX_FLAGS, 'nvcc': NVCC_FLAGS},
)
ext_modules.append(positional_encoding_extension)
# Layer normalization kernels.
layernorm_extension = cpp_extension.CUDAExtension(
name='cacheflow.layernorm_ops',
sources=['csrc/layernorm.cpp', 'csrc/layernorm_kernels.cu'],
extra_compile_args={'cxx': CXX_FLAGS, 'nvcc': NVCC_FLAGS},
)
ext_modules.append(layernorm_extension)
2023-05-03 14:09:44 -07:00
# Activation kernels.
2023-04-02 00:30:17 -07:00
activation_extension = cpp_extension.CUDAExtension(
name='cacheflow.activation_ops',
sources=['csrc/activation.cpp', 'csrc/activation_kernels.cu'],
extra_compile_args={'cxx': CXX_FLAGS, 'nvcc': NVCC_FLAGS},
)
ext_modules.append(activation_extension)
2023-02-16 07:47:03 +00:00
setuptools.setup(
name='cacheflow',
ext_modules=ext_modules,
cmdclass={'build_ext': cpp_extension.BuildExtension},
)