2024-08-26 21:33:58 -07:00
|
|
|
import glob
|
|
|
|
import os
|
|
|
|
import runpy
|
|
|
|
import tempfile
|
|
|
|
|
|
|
|
import depyf
|
|
|
|
|
2024-11-16 18:02:14 -08:00
|
|
|
from vllm.config import CompilationLevel
|
2024-10-10 12:39:36 -07:00
|
|
|
|
2024-08-28 17:32:26 -07:00
|
|
|
# disable custom dispatcher, let Dynamo takes over
|
|
|
|
# all the control
|
2024-10-10 12:39:36 -07:00
|
|
|
os.environ['VLLM_TORCH_COMPILE_LEVEL'] = str(CompilationLevel.DYNAMO_AS_IS)
|
2024-08-28 17:32:26 -07:00
|
|
|
|
2024-08-26 21:33:58 -07:00
|
|
|
temp_dir = tempfile.mkdtemp()
|
|
|
|
with depyf.prepare_debug(temp_dir):
|
|
|
|
cur_dir = os.path.dirname(__file__)
|
|
|
|
parent_dir = os.path.dirname(cur_dir)
|
|
|
|
root_dir = os.path.dirname(parent_dir)
|
|
|
|
example_file = os.path.join(root_dir, "examples",
|
|
|
|
"offline_inference_tpu.py")
|
|
|
|
runpy.run_path(example_file)
|
|
|
|
|
|
|
|
compiled_code = sorted(
|
|
|
|
glob.glob(os.path.join(temp_dir, "__transformed_code*.py")))
|
2024-08-28 17:32:26 -07:00
|
|
|
|
2024-08-26 21:33:58 -07:00
|
|
|
# we should only trigger Dynamo compilation three times:
|
2024-08-28 17:32:26 -07:00
|
|
|
# one for the profiling phase without kv cache
|
2024-08-26 21:33:58 -07:00
|
|
|
# one for the prefill phase with symbolic shapes
|
|
|
|
# one for the decode phase with symbolic shapes
|
|
|
|
# and later calls should not trigger Dynamo compilation again.
|
|
|
|
# NOTE: it might still trigger XLA compilation.
|
|
|
|
|
|
|
|
# check we have three compiled code
|
2024-08-28 17:32:26 -07:00
|
|
|
# this is the assumption when we use the custom dispatcher
|
2024-08-26 21:33:58 -07:00
|
|
|
assert len(compiled_code) == 3
|
|
|
|
|
2024-08-28 17:32:26 -07:00
|
|
|
# check all the compilations are as expected
|
|
|
|
compiled_fn = sorted(
|
|
|
|
glob.glob(os.path.join(temp_dir, "__compiled_fn*Captured*.py")))
|
|
|
|
|
|
|
|
# the first compilation is the profiling phase,
|
|
|
|
# it should not have any kv cache
|
|
|
|
with open(compiled_fn[0]) as f:
|
|
|
|
content = f.read()
|
|
|
|
assert "kv_caches" not in content
|
|
|
|
|
|
|
|
# the second compilation is the prefill phase,
|
|
|
|
# it should have kv cache and the flash_attention op
|
|
|
|
with open(compiled_fn[1]) as f:
|
|
|
|
content = f.read()
|
|
|
|
assert "kv_caches" in content and "torch.ops.xla.flash_attention" in content
|
|
|
|
|
|
|
|
# the third compilation is the decode phase,
|
|
|
|
# it should have kv cache and the paged_attention op
|
|
|
|
with open(compiled_fn[2]) as f:
|
|
|
|
content = f.read()
|
|
|
|
assert "kv_caches" in content and "torch.ops.xla.paged_attention" in content
|