2023-06-14 19:55:38 -07:00
|
|
|
"""Benchmark the latency of processing a single batch of requests."""
|
2023-04-01 00:51:08 +08:00
|
|
|
import argparse
|
|
|
|
import time
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
import torch
|
2023-05-22 17:03:40 -07:00
|
|
|
from tqdm import tqdm
|
2023-04-01 00:51:08 +08:00
|
|
|
|
2023-06-17 03:07:40 -07:00
|
|
|
from vllm import LLM, SamplingParams
|
2023-04-01 00:51:08 +08:00
|
|
|
|
|
|
|
|
|
|
|
def main(args: argparse.Namespace):
|
2023-05-22 17:03:40 -07:00
|
|
|
print(args)
|
|
|
|
|
|
|
|
# Process all the requests in a single batch if possible.
|
|
|
|
# NOTE(woosuk): If the request cannot be processed in a single batch,
|
2023-06-17 17:25:21 +08:00
|
|
|
# the engine will automatically process the request in multiple batches.
|
2023-05-22 17:03:40 -07:00
|
|
|
llm = LLM(
|
|
|
|
model=args.model,
|
2023-06-28 09:46:58 -07:00
|
|
|
tokenizer=args.tokenizer,
|
2023-09-16 00:03:37 -07:00
|
|
|
quantization=args.quantization,
|
2023-05-22 17:03:40 -07:00
|
|
|
tensor_parallel_size=args.tensor_parallel_size,
|
|
|
|
max_num_seqs=args.batch_size,
|
|
|
|
max_num_batched_tokens=args.batch_size * args.input_len,
|
2023-07-20 08:06:15 +08:00
|
|
|
trust_remote_code=args.trust_remote_code,
|
2023-10-01 00:04:03 -04:00
|
|
|
dtype=args.dtype,
|
2023-05-22 17:03:40 -07:00
|
|
|
)
|
2023-04-01 00:51:08 +08:00
|
|
|
|
2023-05-11 15:45:30 -07:00
|
|
|
sampling_params = SamplingParams(
|
|
|
|
n=args.n,
|
|
|
|
temperature=0.0 if args.use_beam_search else 1.0,
|
|
|
|
top_p=1.0,
|
|
|
|
use_beam_search=args.use_beam_search,
|
2023-05-22 17:03:40 -07:00
|
|
|
ignore_eos=True,
|
2023-05-11 15:45:30 -07:00
|
|
|
max_tokens=args.output_len,
|
|
|
|
)
|
2023-04-07 17:45:07 -07:00
|
|
|
print(sampling_params)
|
2023-05-22 17:03:40 -07:00
|
|
|
dummy_prompt_token_ids = [[0] * args.input_len] * args.batch_size
|
2023-04-01 00:51:08 +08:00
|
|
|
|
2023-05-22 17:03:40 -07:00
|
|
|
def run_to_completion(profile: bool = False):
|
2023-04-01 00:51:08 +08:00
|
|
|
if profile:
|
|
|
|
torch.cuda.cudart().cudaProfilerStart()
|
2023-10-02 19:22:05 -07:00
|
|
|
start_time = time.perf_counter()
|
2023-05-22 17:03:40 -07:00
|
|
|
|
2023-06-04 12:52:41 -07:00
|
|
|
llm.generate(prompt_token_ids=dummy_prompt_token_ids,
|
|
|
|
sampling_params=sampling_params,
|
2023-05-22 17:03:40 -07:00
|
|
|
use_tqdm=False)
|
|
|
|
|
2023-10-02 19:22:05 -07:00
|
|
|
end_time = time.perf_counter()
|
2023-04-01 00:51:08 +08:00
|
|
|
latency = end_time - start_time
|
|
|
|
if profile:
|
|
|
|
torch.cuda.cudart().cudaProfilerStop()
|
|
|
|
return latency
|
|
|
|
|
2023-05-22 17:03:40 -07:00
|
|
|
print("Warming up...")
|
|
|
|
run_to_completion(profile=False)
|
2023-04-01 00:51:08 +08:00
|
|
|
|
|
|
|
# Benchmark.
|
|
|
|
latencies = []
|
2023-05-22 17:03:40 -07:00
|
|
|
for _ in tqdm(range(args.num_iters), desc="Profiling iterations"):
|
|
|
|
latencies.append(run_to_completion(profile=False))
|
2023-04-01 00:51:08 +08:00
|
|
|
print(f'Avg latency: {np.mean(latencies)} seconds')
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2023-04-30 15:42:17 +08:00
|
|
|
parser = argparse.ArgumentParser(
|
2023-05-22 17:03:40 -07:00
|
|
|
description='Benchmark the latency of processing a single batch of '
|
2023-09-16 00:03:37 -07:00
|
|
|
'requests till completion.')
|
2023-05-22 17:03:40 -07:00
|
|
|
parser.add_argument('--model', type=str, default='facebook/opt-125m')
|
2023-06-28 09:46:58 -07:00
|
|
|
parser.add_argument('--tokenizer', type=str, default=None)
|
2023-09-16 00:03:37 -07:00
|
|
|
parser.add_argument('--quantization',
|
|
|
|
'-q',
|
2023-10-22 03:14:59 -03:00
|
|
|
choices=['awq', 'squeezellm', None],
|
2023-09-16 00:03:37 -07:00
|
|
|
default=None)
|
2023-05-22 17:03:40 -07:00
|
|
|
parser.add_argument('--tensor-parallel-size', '-tp', type=int, default=1)
|
2023-04-01 00:51:08 +08:00
|
|
|
parser.add_argument('--input-len', type=int, default=32)
|
|
|
|
parser.add_argument('--output-len', type=int, default=128)
|
|
|
|
parser.add_argument('--batch-size', type=int, default=8)
|
2023-09-16 00:03:37 -07:00
|
|
|
parser.add_argument('--n',
|
|
|
|
type=int,
|
|
|
|
default=1,
|
2023-05-22 17:03:40 -07:00
|
|
|
help='Number of generated sequences per prompt.')
|
2023-04-07 17:45:07 -07:00
|
|
|
parser.add_argument('--use-beam-search', action='store_true')
|
2023-09-16 00:03:37 -07:00
|
|
|
parser.add_argument('--num-iters',
|
|
|
|
type=int,
|
|
|
|
default=3,
|
2023-05-22 17:03:40 -07:00
|
|
|
help='Number of iterations to run.')
|
2023-09-16 00:03:37 -07:00
|
|
|
parser.add_argument('--trust-remote-code',
|
|
|
|
action='store_true',
|
2023-07-20 08:06:15 +08:00
|
|
|
help='trust remote code from huggingface')
|
2023-10-01 00:04:03 -04:00
|
|
|
parser.add_argument(
|
|
|
|
'--dtype',
|
|
|
|
type=str,
|
|
|
|
default='auto',
|
|
|
|
choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'],
|
|
|
|
help='data type for model weights and activations. '
|
|
|
|
'The "auto" option will use FP16 precision '
|
|
|
|
'for FP32 and FP16 models, and BF16 precision '
|
|
|
|
'for BF16 models.')
|
2023-04-01 00:51:08 +08:00
|
|
|
args = parser.parse_args()
|
|
|
|
main(args)
|