Update default max_num_batch_tokens for chunked prefill to 2048 (#10544)

This commit is contained in:
Michael Goin 2024-11-23 00:14:19 -05:00 committed by GitHub
parent cfea9c04ef
commit 02a43f82a9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1133,9 +1133,9 @@ class SchedulerConfig:
# max_num_batched_tokens.
self.max_num_batched_tokens = max(self.max_model_len, 2048)
else:
# It is the values that have the best balance between ITL
# and TTFT on A100. Note it is not optimized for throughput.
self.max_num_batched_tokens = 512
# This value is chosen to have a balance between ITL
# and TTFT. Note it is not optimized for throughput.
self.max_num_batched_tokens = 2048
else:
# If max_model_len is too short, use 2048 as the default value
# for higher throughput.