[TPU] Set per-rank XLA cache (#7533)

This commit is contained in:
Woosuk Kwon 2024-08-14 14:47:51 -07:00 committed by GitHub
parent 2ecf7b1757
commit 951fdd66d3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -102,12 +102,12 @@ class TPUWorker(LoraNotSupportedWorkerBase, LocalOrDistributedWorkerBase):
# 30-40 graphs for decode. 128 is an arbitrary safe number.
torch._dynamo.config.cache_size_limit = 128
# Use persistent cache to avoid XLA recompilation.
# NOTE(woosuk): This does not completely eliminate the recompilation
# overhead because dynamo does not cache the compiled results.
# NOTE(woosuk): Set readonly=False only for the rank 0 process to avoid
# race conditions.
xr.initialize_cache(envs.VLLM_XLA_CACHE_PATH,
readonly=not self.is_driver_worker)
# NOTE(woosuk): Set per-rank cache path since different ranks
# can have slightly different XLA graphs.
world_size = self.parallel_config.world_size
per_rank_path = os.path.join(envs.VLLM_XLA_CACHE_PATH,
f"tp{world_size}_rank{self.rank}")
xr.initialize_cache(per_rank_path, readonly=False)
def load_model(self):
self.model_runner.load_model()