43 lines
1.1 KiB
C++
43 lines
1.1 KiB
C++
#include <torch/extension.h>
|
|
#include <c10/util/Optional.h>
|
|
|
|
void paged_attention_v1(
|
|
torch::Tensor& out,
|
|
torch::Tensor& query,
|
|
torch::Tensor& key_cache,
|
|
torch::Tensor& value_cache,
|
|
torch::Tensor& head_mapping,
|
|
float scale,
|
|
torch::Tensor& block_tables,
|
|
torch::Tensor& context_lens,
|
|
int block_size,
|
|
int max_context_len,
|
|
const c10::optional<torch::Tensor>& alibi_slopes);
|
|
|
|
void paged_attention_v2(
|
|
torch::Tensor& out,
|
|
torch::Tensor& exp_sums,
|
|
torch::Tensor& max_logits,
|
|
torch::Tensor& tmp_out,
|
|
torch::Tensor& query,
|
|
torch::Tensor& key_cache,
|
|
torch::Tensor& value_cache,
|
|
torch::Tensor& head_mapping,
|
|
float scale,
|
|
torch::Tensor& block_tables,
|
|
torch::Tensor& context_lens,
|
|
int block_size,
|
|
int max_context_len,
|
|
const c10::optional<torch::Tensor>& alibi_slopes);
|
|
|
|
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
|
m.def(
|
|
"paged_attention_v1",
|
|
&paged_attention_v1,
|
|
"Compute the attention between an input query and the cached keys/values using PagedAttention.");
|
|
m.def(
|
|
"paged_attention_v2",
|
|
&paged_attention_v2,
|
|
"PagedAttention V2.");
|
|
}
|