2023-12-14 12:35:58 -05:00
|
|
|
#pragma once
|
|
|
|
|
2024-06-26 17:53:04 -04:00
|
|
|
#include <optional>
|
2024-06-09 16:23:30 -04:00
|
|
|
#include <torch/library.h>
|
2023-11-23 16:31:19 -08:00
|
|
|
|
2024-08-02 16:51:58 -04:00
|
|
|
#include "core/scalar_type.hpp"
|
|
|
|
|
2024-10-27 00:19:28 -07:00
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
torch::Tensor weak_ref_tensor(torch::Tensor& tensor) {
|
|
|
|
// Ensure tensor is on CUDA
|
|
|
|
if (!tensor.is_cuda()) {
|
|
|
|
throw std::runtime_error("Tensor must be on CUDA device");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the raw data pointer
|
|
|
|
void* data_ptr = tensor.data_ptr();
|
|
|
|
|
|
|
|
// Get tensor sizes and strides
|
|
|
|
std::vector<int64_t> sizes = tensor.sizes().vec();
|
|
|
|
std::vector<int64_t> strides = tensor.strides().vec();
|
|
|
|
|
|
|
|
// Get tensor options (dtype, device)
|
|
|
|
auto options = tensor.options();
|
|
|
|
|
|
|
|
// Create a new tensor from the raw data pointer
|
|
|
|
auto new_tensor = torch::from_blob(data_ptr, sizes, strides, options);
|
|
|
|
|
|
|
|
return new_tensor;
|
|
|
|
}
|
|
|
|
|
2024-05-25 01:00:52 -04:00
|
|
|
void paged_attention_v1(
|
|
|
|
torch::Tensor& out, torch::Tensor& query, torch::Tensor& key_cache,
|
2024-06-09 16:23:30 -04:00
|
|
|
torch::Tensor& value_cache, int64_t num_kv_heads, double scale,
|
|
|
|
torch::Tensor& block_tables, torch::Tensor& seq_lens, int64_t block_size,
|
2025-01-04 17:20:34 -08:00
|
|
|
int64_t max_seq_len, const std::optional<torch::Tensor>& alibi_slopes,
|
2025-01-23 13:04:03 -05:00
|
|
|
const std::string& kv_cache_dtype, torch::Tensor& k_scale,
|
|
|
|
torch::Tensor& v_scale, const int64_t tp_rank,
|
|
|
|
const int64_t blocksparse_local_blocks,
|
2024-06-09 16:23:30 -04:00
|
|
|
const int64_t blocksparse_vert_stride, const int64_t blocksparse_block_size,
|
|
|
|
const int64_t blocksparse_head_sliding_step);
|
2024-05-22 03:18:41 -04:00
|
|
|
|
2024-05-25 01:00:52 -04:00
|
|
|
void paged_attention_v2(
|
|
|
|
torch::Tensor& out, torch::Tensor& exp_sums, torch::Tensor& max_logits,
|
|
|
|
torch::Tensor& tmp_out, torch::Tensor& query, torch::Tensor& key_cache,
|
2024-06-09 16:23:30 -04:00
|
|
|
torch::Tensor& value_cache, int64_t num_kv_heads, double scale,
|
|
|
|
torch::Tensor& block_tables, torch::Tensor& seq_lens, int64_t block_size,
|
2025-01-04 17:20:34 -08:00
|
|
|
int64_t max_seq_len, const std::optional<torch::Tensor>& alibi_slopes,
|
2025-01-23 13:04:03 -05:00
|
|
|
const std::string& kv_cache_dtype, torch::Tensor& k_scale,
|
|
|
|
torch::Tensor& v_scale, const int64_t tp_rank,
|
|
|
|
const int64_t blocksparse_local_blocks,
|
2024-06-09 16:23:30 -04:00
|
|
|
const int64_t blocksparse_vert_stride, const int64_t blocksparse_block_size,
|
|
|
|
const int64_t blocksparse_head_sliding_step);
|
2024-05-22 03:18:41 -04:00
|
|
|
|
|
|
|
void rms_norm(torch::Tensor& out, torch::Tensor& input, torch::Tensor& weight,
|
2024-06-09 16:23:30 -04:00
|
|
|
double epsilon);
|
2024-05-22 03:18:41 -04:00
|
|
|
|
|
|
|
void fused_add_rms_norm(torch::Tensor& input, torch::Tensor& residual,
|
2024-06-09 16:23:30 -04:00
|
|
|
torch::Tensor& weight, double epsilon);
|
2024-05-22 03:18:41 -04:00
|
|
|
|
2024-11-08 16:20:08 -05:00
|
|
|
void rms_norm_static_fp8_quant(torch::Tensor& out, torch::Tensor& input,
|
|
|
|
torch::Tensor& weight, torch::Tensor& scale,
|
|
|
|
double epsilon);
|
|
|
|
|
|
|
|
void fused_add_rms_norm_static_fp8_quant(torch::Tensor& out,
|
|
|
|
torch::Tensor& input,
|
|
|
|
torch::Tensor& residual,
|
|
|
|
torch::Tensor& weight,
|
|
|
|
torch::Tensor& scale, double epsilon);
|
|
|
|
|
2024-12-12 22:19:23 -05:00
|
|
|
void rms_norm_dynamic_per_token_quant(torch::Tensor& out,
|
|
|
|
torch::Tensor const& input,
|
|
|
|
torch::Tensor const& weight,
|
|
|
|
torch::Tensor& scales,
|
|
|
|
double const epsilon,
|
|
|
|
std::optional<torch::Tensor> scale_ub,
|
|
|
|
std::optional<torch::Tensor> residual);
|
|
|
|
|
2024-05-22 03:18:41 -04:00
|
|
|
void rotary_embedding(torch::Tensor& positions, torch::Tensor& query,
|
2024-06-09 16:23:30 -04:00
|
|
|
torch::Tensor& key, int64_t head_size,
|
2024-05-22 03:18:41 -04:00
|
|
|
torch::Tensor& cos_sin_cache, bool is_neox);
|
|
|
|
|
|
|
|
void batched_rotary_embedding(torch::Tensor& positions, torch::Tensor& query,
|
2024-06-09 16:23:30 -04:00
|
|
|
torch::Tensor& key, int64_t head_size,
|
2024-05-22 03:18:41 -04:00
|
|
|
torch::Tensor& cos_sin_cache, bool is_neox,
|
2024-06-09 16:23:30 -04:00
|
|
|
int64_t rot_dim,
|
2024-05-22 03:18:41 -04:00
|
|
|
torch::Tensor& cos_sin_cache_offsets);
|
|
|
|
|
|
|
|
void silu_and_mul(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
|
2025-01-15 10:29:53 +08:00
|
|
|
void mul_and_silu(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
|
2024-05-22 03:18:41 -04:00
|
|
|
void gelu_and_mul(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
|
|
|
|
void gelu_tanh_and_mul(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
|
2024-10-24 16:18:27 +08:00
|
|
|
void fatrelu_and_mul(torch::Tensor& out, torch::Tensor& input,
|
|
|
|
double threshold);
|
|
|
|
|
2024-05-22 03:18:41 -04:00
|
|
|
void gelu_new(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
|
|
|
|
void gelu_fast(torch::Tensor& out, torch::Tensor& input);
|
2023-11-23 16:31:19 -08:00
|
|
|
|
2024-06-20 04:52:09 -07:00
|
|
|
void gelu_quick(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
|
2024-09-12 11:16:22 -07:00
|
|
|
void advance_step_flashattn(int64_t num_seqs, int64_t num_queries,
|
|
|
|
int64_t block_size, torch::Tensor& input_tokens,
|
|
|
|
torch::Tensor& sampled_token_ids,
|
|
|
|
torch::Tensor& input_positions,
|
|
|
|
torch::Tensor& seq_lens,
|
|
|
|
torch::Tensor& slot_mapping,
|
|
|
|
torch::Tensor& block_tables);
|
|
|
|
|
|
|
|
void advance_step_flashinfer(
|
|
|
|
int64_t num_seqs, int64_t num_queries, int64_t block_size,
|
|
|
|
torch::Tensor& input_tokens, torch::Tensor& sampled_token_ids,
|
|
|
|
torch::Tensor& input_positions, torch::Tensor& seq_lens,
|
|
|
|
torch::Tensor& slot_mapping, torch::Tensor& block_tables,
|
|
|
|
torch::Tensor& paged_kv_indices, torch::Tensor& paged_kv_indptr,
|
|
|
|
torch::Tensor& paged_kv_last_page_len, torch::Tensor& block_table_bounds);
|
2024-07-17 17:30:28 -04:00
|
|
|
|
2023-12-08 15:16:52 +08:00
|
|
|
#ifndef USE_ROCM
|
2024-05-22 03:18:41 -04:00
|
|
|
torch::Tensor aqlm_gemm(const torch::Tensor& input, const torch::Tensor& codes,
|
|
|
|
const torch::Tensor& codebooks,
|
|
|
|
const torch::Tensor& scales,
|
2024-08-16 17:00:11 -04:00
|
|
|
const std::vector<int64_t>& codebook_partition_sizes,
|
2024-05-22 03:18:41 -04:00
|
|
|
const std::optional<torch::Tensor>& bias);
|
|
|
|
|
2024-08-16 17:00:11 -04:00
|
|
|
torch::Tensor aqlm_dequant(
|
|
|
|
const torch::Tensor& codes, const torch::Tensor& codebooks,
|
|
|
|
const std::vector<int64_t>& codebook_partition_sizes);
|
2024-05-22 03:18:41 -04:00
|
|
|
|
|
|
|
torch::Tensor awq_gemm(torch::Tensor _in_feats, torch::Tensor _kernel,
|
|
|
|
torch::Tensor _scaling_factors, torch::Tensor _zeros,
|
2024-06-09 16:23:30 -04:00
|
|
|
int64_t split_k_iters);
|
2024-05-22 03:18:41 -04:00
|
|
|
|
|
|
|
torch::Tensor awq_dequantize(torch::Tensor _kernel,
|
|
|
|
torch::Tensor _scaling_factors,
|
2024-06-09 16:23:30 -04:00
|
|
|
torch::Tensor _zeros, int64_t split_k_iters,
|
|
|
|
int64_t thx, int64_t thy);
|
2024-05-22 03:18:41 -04:00
|
|
|
|
2024-09-23 13:46:26 -04:00
|
|
|
torch::Tensor permute_cols(torch::Tensor const& A, torch::Tensor const& perm);
|
2024-11-23 13:14:49 +08:00
|
|
|
#endif
|
2024-09-23 13:46:26 -04:00
|
|
|
|
2024-08-16 17:00:11 -04:00
|
|
|
torch::Tensor ggml_dequantize(torch::Tensor W, int64_t type, int64_t m,
|
2024-08-06 07:54:23 +08:00
|
|
|
int64_t n);
|
|
|
|
|
2024-08-16 17:00:11 -04:00
|
|
|
torch::Tensor ggml_mul_mat_vec_a8(torch::Tensor W, torch::Tensor X,
|
|
|
|
int64_t type, int64_t row);
|
2024-08-06 07:54:23 +08:00
|
|
|
|
2024-08-16 17:00:11 -04:00
|
|
|
torch::Tensor ggml_mul_mat_a8(torch::Tensor W, torch::Tensor X, int64_t type,
|
2024-08-06 07:54:23 +08:00
|
|
|
int64_t row);
|
|
|
|
|
2025-03-12 04:33:27 +01:00
|
|
|
torch::Tensor ggml_moe_a8(torch::Tensor X, torch::Tensor W,
|
|
|
|
torch::Tensor sorted_token_ids,
|
|
|
|
torch::Tensor expert_ids,
|
|
|
|
torch::Tensor num_tokens_post_padded, int64_t type,
|
|
|
|
int64_t row, int64_t top_k, int64_t tokens);
|
|
|
|
|
|
|
|
int64_t ggml_moe_get_block_size(int64_t type);
|
|
|
|
|
2024-11-23 13:14:49 +08:00
|
|
|
#ifndef USE_ROCM
|
2025-03-11 22:13:11 -07:00
|
|
|
|
|
|
|
bool cutlass_scaled_mm_supports_fp4(int64_t cuda_device_capability);
|
|
|
|
bool cutlass_scaled_mm_supports_fp8(int64_t cuda_device_capability);
|
|
|
|
bool cutlass_scaled_mm_supports_block_fp8(int64_t cuda_device_capability);
|
|
|
|
|
2025-02-22 05:24:05 -08:00
|
|
|
void cutlass_scaled_fp4_mm(torch::Tensor& D, torch::Tensor const& A,
|
|
|
|
torch::Tensor const& B, torch::Tensor const& A_sf,
|
|
|
|
torch::Tensor const& B_sf,
|
|
|
|
torch::Tensor const& alpha);
|
|
|
|
|
2024-06-13 14:22:19 -04:00
|
|
|
void cutlass_scaled_mm(torch::Tensor& out, torch::Tensor const& a,
|
|
|
|
torch::Tensor const& b, torch::Tensor const& a_scales,
|
2024-06-26 11:16:00 -04:00
|
|
|
torch::Tensor const& b_scales,
|
2025-01-04 17:20:34 -08:00
|
|
|
std::optional<torch::Tensor> const& bias);
|
2024-05-16 18:32:50 -04:00
|
|
|
|
2024-08-06 14:17:08 -04:00
|
|
|
void cutlass_scaled_mm_azp(torch::Tensor& out, torch::Tensor const& a,
|
|
|
|
torch::Tensor const& b,
|
|
|
|
torch::Tensor const& a_scales,
|
|
|
|
torch::Tensor const& b_scales,
|
|
|
|
torch::Tensor const& azp_adj,
|
2025-01-04 17:20:34 -08:00
|
|
|
std::optional<torch::Tensor> const& azp,
|
|
|
|
std::optional<torch::Tensor> const& bias);
|
2024-12-18 09:57:16 -05:00
|
|
|
|
2024-12-18 21:43:30 -05:00
|
|
|
bool cutlass_sparse_scaled_mm_supported(int64_t cuda_device_capability);
|
|
|
|
|
2024-12-18 09:57:16 -05:00
|
|
|
void cutlass_scaled_sparse_mm(torch::Tensor& out, torch::Tensor const& a,
|
|
|
|
torch::Tensor const& b, torch::Tensor const& e,
|
|
|
|
torch::Tensor const& a_scales,
|
|
|
|
torch::Tensor const& b_scales,
|
2025-01-04 17:20:34 -08:00
|
|
|
std::optional<torch::Tensor> const& bias);
|
2024-12-18 09:57:16 -05:00
|
|
|
|
2025-02-13 19:01:14 -05:00
|
|
|
std::vector<torch::Tensor> cutlass_sparse_compress(torch::Tensor const& a);
|
2025-02-14 20:30:42 -08:00
|
|
|
|
|
|
|
void scaled_fp4_quant(torch::Tensor& output, torch::Tensor const& input,
|
|
|
|
torch::Tensor& output_scale,
|
|
|
|
torch::Tensor const& input_scale);
|
2023-12-08 15:16:52 +08:00
|
|
|
#endif
|
2023-11-23 16:31:19 -08:00
|
|
|
|
2024-06-03 12:52:30 -04:00
|
|
|
void static_scaled_int8_quant(torch::Tensor& out, torch::Tensor const& input,
|
2024-09-16 14:52:40 -04:00
|
|
|
torch::Tensor const& scale,
|
2025-01-04 17:20:34 -08:00
|
|
|
std::optional<torch::Tensor> const& azp);
|
2024-05-23 17:29:18 -04:00
|
|
|
|
2024-06-07 12:36:26 -04:00
|
|
|
void dynamic_scaled_int8_quant(torch::Tensor& out, torch::Tensor const& input,
|
2024-09-16 14:52:40 -04:00
|
|
|
torch::Tensor& scales,
|
2025-01-04 17:20:34 -08:00
|
|
|
std::optional<torch::Tensor> const& azp);
|
2024-06-07 12:36:26 -04:00
|
|
|
|
2024-05-22 03:18:41 -04:00
|
|
|
torch::Tensor gptq_gemm(torch::Tensor a, torch::Tensor b_q_weight,
|
|
|
|
torch::Tensor b_gptq_qzeros,
|
|
|
|
torch::Tensor b_gptq_scales, torch::Tensor b_g_idx,
|
2024-06-09 16:23:30 -04:00
|
|
|
bool use_exllama, int64_t bit);
|
2024-05-22 03:18:41 -04:00
|
|
|
|
2024-06-09 16:23:30 -04:00
|
|
|
void gptq_shuffle(torch::Tensor q_weight, torch::Tensor q_perm, int64_t bit);
|
2024-05-22 03:18:41 -04:00
|
|
|
|
2024-07-17 21:38:35 -04:00
|
|
|
void static_scaled_fp8_quant(torch::Tensor& out, torch::Tensor const& input,
|
|
|
|
torch::Tensor const& scale);
|
2024-05-22 03:18:41 -04:00
|
|
|
|
2024-07-17 21:38:35 -04:00
|
|
|
void dynamic_scaled_fp8_quant(torch::Tensor& out, torch::Tensor const& input,
|
2024-05-22 03:18:41 -04:00
|
|
|
torch::Tensor& scale);
|
|
|
|
|
2024-07-19 21:15:26 -04:00
|
|
|
void dynamic_per_token_scaled_fp8_quant(
|
|
|
|
torch::Tensor& out, torch::Tensor const& input, torch::Tensor& scale,
|
2025-01-04 17:20:34 -08:00
|
|
|
std::optional<torch::Tensor> const& scale_ub);
|
2024-07-17 21:38:35 -04:00
|
|
|
|
2024-09-30 00:35:58 +03:00
|
|
|
void selective_scan_fwd(const torch::Tensor& u, const torch::Tensor& delta,
|
|
|
|
const torch::Tensor& A, const torch::Tensor& B,
|
|
|
|
const torch::Tensor& C,
|
2025-01-04 17:20:34 -08:00
|
|
|
const std::optional<torch::Tensor>& D_,
|
|
|
|
const std::optional<torch::Tensor>& z_,
|
|
|
|
const std::optional<torch::Tensor>& delta_bias_,
|
2024-09-30 00:35:58 +03:00
|
|
|
bool delta_softplus,
|
2025-01-04 17:20:34 -08:00
|
|
|
const std::optional<torch::Tensor>& query_start_loc,
|
|
|
|
const std::optional<torch::Tensor>& cache_indices,
|
|
|
|
const std::optional<torch::Tensor>& has_initial_state,
|
2024-10-17 00:12:43 +08:00
|
|
|
const torch::Tensor& ssm_states, int64_t pad_slot_id);
|
|
|
|
|
|
|
|
void causal_conv1d_update(const at::Tensor& x, const at::Tensor& conv_state,
|
|
|
|
const at::Tensor& weight,
|
2025-01-04 17:20:34 -08:00
|
|
|
const std::optional<at::Tensor>& bias_,
|
2024-10-17 00:12:43 +08:00
|
|
|
bool silu_activation,
|
2025-01-04 17:20:34 -08:00
|
|
|
const std::optional<at::Tensor>& cache_seqlens_,
|
|
|
|
const std::optional<at::Tensor>& conv_state_indices_,
|
2024-10-17 00:12:43 +08:00
|
|
|
int64_t pad_slot_id);
|
|
|
|
|
|
|
|
void causal_conv1d_fwd(const at::Tensor& x, const at::Tensor& weight,
|
2025-01-04 17:20:34 -08:00
|
|
|
const std::optional<at::Tensor>& bias_,
|
|
|
|
const std::optional<at::Tensor>& conv_states,
|
|
|
|
const std::optional<at::Tensor>& query_start_loc,
|
|
|
|
const std::optional<at::Tensor>& cache_indices,
|
|
|
|
const std::optional<at::Tensor>& has_initial_state,
|
2024-10-17 00:12:43 +08:00
|
|
|
bool silu_activation, int64_t pad_slot_id);
|
2024-08-29 01:06:52 +03:00
|
|
|
|
2024-01-28 04:46:35 +08:00
|
|
|
#ifndef USE_ROCM
|
2024-06-09 16:23:30 -04:00
|
|
|
using fptr_t = int64_t;
|
2024-11-06 23:50:47 -08:00
|
|
|
fptr_t init_custom_ar(const std::vector<int64_t>& fake_ipc_ptrs,
|
|
|
|
torch::Tensor& rank_data, int64_t rank, bool full_nvlink);
|
|
|
|
void all_reduce(fptr_t _fa, torch::Tensor& inp, torch::Tensor& out,
|
|
|
|
fptr_t reg_buffer, int64_t reg_buffer_sz_bytes);
|
2024-01-28 04:46:35 +08:00
|
|
|
void dispose(fptr_t _fa);
|
2024-06-09 16:23:30 -04:00
|
|
|
int64_t meta_size();
|
2024-11-06 23:50:47 -08:00
|
|
|
void register_buffer(fptr_t _fa, const std::vector<int64_t>& fake_ipc_ptrs);
|
|
|
|
std::tuple<std::vector<int64_t>, std::vector<int64_t>>
|
|
|
|
get_graph_buffer_ipc_meta(fptr_t _fa);
|
|
|
|
void register_graph_buffers(fptr_t _fa,
|
|
|
|
const std::vector<std::vector<int64_t>>& handles,
|
2024-05-22 03:18:41 -04:00
|
|
|
const std::vector<std::vector<int64_t>>& offsets);
|
2024-01-28 04:46:35 +08:00
|
|
|
#endif
|