From 72d3a30c6327e70de3595d00f04e2d577fcbbb68 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Mon, 5 Feb 2024 12:45:37 -0800 Subject: [PATCH] [Minor] Fix benchmark_latency script (#2765) --- benchmarks/benchmark_latency.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/benchmarks/benchmark_latency.py b/benchmarks/benchmark_latency.py index 2eb9e2cb..6e3b679c 100644 --- a/benchmarks/benchmark_latency.py +++ b/benchmarks/benchmark_latency.py @@ -37,7 +37,10 @@ def main(args: argparse.Namespace): max_tokens=args.output_len, ) print(sampling_params) - dummy_prompt_token_ids = [[0] * args.input_len] * args.batch_size + dummy_prompt_token_ids = np.random.randint(10000, + size=(args.batch_size, + args.input_len)) + dummy_prompt_token_ids = dummy_prompt_token_ids.tolist() def run_to_completion(profile_dir: Optional[str] = None): if profile_dir: @@ -71,7 +74,7 @@ def main(args: argparse.Namespace): "." ) / "vllm_benchmark_result" / f"latency_result_{time.time()}" print(f"Profiling (results will be saved to '{profile_dir}')...") - run_to_completion(profile_dir=args.profile_result_dir) + run_to_completion(profile_dir=profile_dir) return # Benchmark.