fix lint
This commit is contained in:
parent
cf6ff18246
commit
ad50bf4b25
2
.github/workflows/ruff.yml
vendored
2
.github/workflows/ruff.yml
vendored
@ -31,4 +31,4 @@ jobs:
|
||||
ruff vllm tests
|
||||
- name: Spelling check with codespell
|
||||
run: |
|
||||
codespell --toml pyproject.toml
|
||||
codespell --toml pyproject.toml
|
@ -36,8 +36,8 @@ def test_contexted_kv_attention(
|
||||
torch.cuda.manual_seed(0)
|
||||
torch.set_default_device(device)
|
||||
|
||||
# Need this, otherwise when we capture the graph the process for GPU 1 would run on both
|
||||
# GPU0 and GPU1 and things would hang
|
||||
# Need this, otherwise when we capture the graph the process for GPU 1 would
|
||||
# run on both GPU0 and GPU1 and things would hang
|
||||
#
|
||||
# see also similar issue: https://github.com/Dao-AILab/flash-attention/issues/523
|
||||
torch.cuda.set_device(device)
|
||||
|
Loading…
x
Reference in New Issue
Block a user