vllm/csrc/attention/attention_utils.cuh

58 lines
1.8 KiB
Plaintext
Raw Normal View History

/*
* Adapted from
* https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp
2023-06-17 03:07:40 -07:00
* Copyright (c) 2023, The vLLM team.
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
2023-05-03 13:40:13 -07:00
#pragma once
#include "../cuda_compat.h"
2023-05-03 14:09:44 -07:00
#include "attention_dtypes.h"
2023-05-03 13:40:13 -07:00
#include <float.h>
#include <type_traits>
2023-06-17 03:07:40 -07:00
namespace vllm {
2023-05-03 13:40:13 -07:00
// Q*K^T operation.
template <int THREAD_GROUP_SIZE, typename Vec, int N>
2023-05-03 13:40:13 -07:00
inline __device__ float qk_dot_(const Vec (&q)[N], const Vec (&k)[N]) {
using A_vec = typename FloatVec<Vec>::Type;
// Compute the parallel products for Q*K^T (treat vector lanes separately).
A_vec qk_vec = mul<A_vec, Vec, Vec>(q[0], k[0]);
#pragma unroll
for (int ii = 1; ii < N; ++ii) {
qk_vec = fma(q[ii], k[ii], qk_vec);
}
// Finalize the reduction across lanes.
float qk = sum(qk_vec);
#pragma unroll
for (int mask = THREAD_GROUP_SIZE / 2; mask >= 1; mask /= 2) {
qk += VLLM_SHFL_XOR_SYNC(qk, mask);
2023-05-03 13:40:13 -07:00
}
return qk;
}
template <typename T, int THREAD_GROUP_SIZE>
2023-05-03 13:40:13 -07:00
struct Qk_dot {
template <typename Vec, int N>
2023-05-03 13:40:13 -07:00
static inline __device__ float dot(const Vec (&q)[N], const Vec (&k)[N]) {
return qk_dot_<THREAD_GROUP_SIZE>(q, k);
}
};
} // namespace vllm