Avoid multiple redefinition (#1817)
This commit is contained in:
parent
05bdf4eaf3
commit
614856da25
@ -1,3 +1,5 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
#include <torch/extension.h>
|
#include <torch/extension.h>
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
#include <torch/extension.h>
|
#include <torch/extension.h>
|
||||||
|
|
||||||
int get_device_attribute(
|
int get_device_attribute(
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
* Adapted from
|
* Adapted from
|
||||||
* https://github.com/pytorch/pytorch/blob/v2.0.1/aten/src/ATen/Dispatch.h
|
* https://github.com/pytorch/pytorch/blob/v2.0.1/aten/src/ATen/Dispatch.h
|
||||||
*/
|
*/
|
||||||
|
#pragma once
|
||||||
|
|
||||||
#include <torch/extension.h>
|
#include <torch/extension.h>
|
||||||
|
|
||||||
#define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \
|
#define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
#include <torch/extension.h>
|
#include <torch/extension.h>
|
||||||
|
|
||||||
void paged_attention_v1(
|
void paged_attention_v1(
|
||||||
|
Loading…
x
Reference in New Issue
Block a user