2023-06-14 19:55:38 -07:00
|
|
|
"""Benchmark the latency of processing a single batch of requests."""
|
2023-04-01 00:51:08 +08:00
|
|
|
import argparse
|
2024-05-16 10:02:56 -07:00
|
|
|
import json
|
2023-04-01 00:51:08 +08:00
|
|
|
import time
|
2023-12-05 20:55:55 -08:00
|
|
|
from pathlib import Path
|
2024-05-29 04:29:31 +08:00
|
|
|
from typing import List, Optional
|
2023-04-01 00:51:08 +08:00
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
import torch
|
2023-05-22 17:03:40 -07:00
|
|
|
from tqdm import tqdm
|
2023-04-01 00:51:08 +08:00
|
|
|
|
2023-06-17 03:07:40 -07:00
|
|
|
from vllm import LLM, SamplingParams
|
2024-06-13 22:36:20 -07:00
|
|
|
from vllm.engine.arg_utils import EngineArgs
|
2024-05-29 04:29:31 +08:00
|
|
|
from vllm.inputs import PromptStrictInputs
|
2024-04-18 03:21:55 -04:00
|
|
|
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
|
2023-04-01 00:51:08 +08:00
|
|
|
|
|
|
|
|
|
|
|
def main(args: argparse.Namespace):
|
2023-05-22 17:03:40 -07:00
|
|
|
print(args)
|
|
|
|
|
|
|
|
# NOTE(woosuk): If the request cannot be processed in a single batch,
|
2023-06-17 17:25:21 +08:00
|
|
|
# the engine will automatically process the request in multiple batches.
|
2024-03-27 16:39:05 -04:00
|
|
|
llm = LLM(model=args.model,
|
2024-05-16 00:53:51 -07:00
|
|
|
speculative_model=args.speculative_model,
|
|
|
|
num_speculative_tokens=args.num_speculative_tokens,
|
2024-03-27 16:39:05 -04:00
|
|
|
tokenizer=args.tokenizer,
|
|
|
|
quantization=args.quantization,
|
|
|
|
tensor_parallel_size=args.tensor_parallel_size,
|
|
|
|
trust_remote_code=args.trust_remote_code,
|
|
|
|
dtype=args.dtype,
|
|
|
|
enforce_eager=args.enforce_eager,
|
|
|
|
kv_cache_dtype=args.kv_cache_dtype,
|
2024-04-03 16:15:55 -05:00
|
|
|
quantization_param_path=args.quantization_param_path,
|
2024-03-27 16:39:05 -04:00
|
|
|
device=args.device,
|
|
|
|
ray_workers_use_nsight=args.ray_workers_use_nsight,
|
2024-05-16 00:53:51 -07:00
|
|
|
use_v2_block_manager=args.use_v2_block_manager,
|
2024-03-29 02:06:01 +09:00
|
|
|
enable_chunked_prefill=args.enable_chunked_prefill,
|
|
|
|
download_dir=args.download_dir,
|
2024-05-28 17:16:18 -07:00
|
|
|
block_size=args.block_size,
|
2024-06-07 18:20:16 -07:00
|
|
|
gpu_memory_utilization=args.gpu_memory_utilization,
|
2024-06-13 22:36:20 -07:00
|
|
|
load_format=args.load_format,
|
2024-06-07 18:20:16 -07:00
|
|
|
distributed_executor_backend=args.distributed_executor_backend)
|
2023-04-01 00:51:08 +08:00
|
|
|
|
2023-05-11 15:45:30 -07:00
|
|
|
sampling_params = SamplingParams(
|
|
|
|
n=args.n,
|
|
|
|
temperature=0.0 if args.use_beam_search else 1.0,
|
|
|
|
top_p=1.0,
|
|
|
|
use_beam_search=args.use_beam_search,
|
2023-05-22 17:03:40 -07:00
|
|
|
ignore_eos=True,
|
2023-05-11 15:45:30 -07:00
|
|
|
max_tokens=args.output_len,
|
|
|
|
)
|
2023-04-07 17:45:07 -07:00
|
|
|
print(sampling_params)
|
2024-02-05 12:45:37 -08:00
|
|
|
dummy_prompt_token_ids = np.random.randint(10000,
|
|
|
|
size=(args.batch_size,
|
|
|
|
args.input_len))
|
2024-05-29 04:29:31 +08:00
|
|
|
dummy_inputs: List[PromptStrictInputs] = [{
|
|
|
|
"prompt_token_ids": batch
|
|
|
|
} for batch in dummy_prompt_token_ids.tolist()]
|
2023-04-01 00:51:08 +08:00
|
|
|
|
2023-12-05 20:55:55 -08:00
|
|
|
def run_to_completion(profile_dir: Optional[str] = None):
|
|
|
|
if profile_dir:
|
|
|
|
with torch.profiler.profile(
|
|
|
|
activities=[
|
|
|
|
torch.profiler.ProfilerActivity.CPU,
|
|
|
|
torch.profiler.ProfilerActivity.CUDA,
|
|
|
|
],
|
|
|
|
on_trace_ready=torch.profiler.tensorboard_trace_handler(
|
|
|
|
str(profile_dir))) as p:
|
2024-05-29 04:29:31 +08:00
|
|
|
llm.generate(dummy_inputs,
|
2023-11-29 23:42:52 -08:00
|
|
|
sampling_params=sampling_params,
|
|
|
|
use_tqdm=False)
|
|
|
|
print(p.key_averages())
|
|
|
|
else:
|
|
|
|
start_time = time.perf_counter()
|
2024-05-29 04:29:31 +08:00
|
|
|
llm.generate(dummy_inputs,
|
2023-11-29 23:42:52 -08:00
|
|
|
sampling_params=sampling_params,
|
|
|
|
use_tqdm=False)
|
|
|
|
end_time = time.perf_counter()
|
|
|
|
latency = end_time - start_time
|
|
|
|
return latency
|
2023-04-01 00:51:08 +08:00
|
|
|
|
2023-05-22 17:03:40 -07:00
|
|
|
print("Warming up...")
|
2024-04-06 14:32:30 -07:00
|
|
|
for _ in tqdm(range(args.num_iters_warmup), desc="Warmup iterations"):
|
|
|
|
run_to_completion(profile_dir=None)
|
2023-04-01 00:51:08 +08:00
|
|
|
|
2023-11-29 23:42:52 -08:00
|
|
|
if args.profile:
|
2023-12-05 20:55:55 -08:00
|
|
|
profile_dir = args.profile_result_dir
|
|
|
|
if not profile_dir:
|
2024-01-24 00:26:37 +01:00
|
|
|
profile_dir = Path(
|
|
|
|
"."
|
|
|
|
) / "vllm_benchmark_result" / f"latency_result_{time.time()}"
|
2023-12-05 20:55:55 -08:00
|
|
|
print(f"Profiling (results will be saved to '{profile_dir}')...")
|
2024-02-05 12:45:37 -08:00
|
|
|
run_to_completion(profile_dir=profile_dir)
|
2023-11-29 23:42:52 -08:00
|
|
|
return
|
|
|
|
|
2023-04-01 00:51:08 +08:00
|
|
|
# Benchmark.
|
|
|
|
latencies = []
|
2023-05-22 17:03:40 -07:00
|
|
|
for _ in tqdm(range(args.num_iters), desc="Profiling iterations"):
|
2023-12-11 11:19:08 -08:00
|
|
|
latencies.append(run_to_completion(profile_dir=None))
|
2024-04-06 14:32:30 -07:00
|
|
|
latencies = np.array(latencies)
|
|
|
|
percentages = [10, 25, 50, 75, 90]
|
|
|
|
percentiles = np.percentile(latencies, percentages)
|
2023-04-01 00:51:08 +08:00
|
|
|
print(f'Avg latency: {np.mean(latencies)} seconds')
|
2024-04-06 14:32:30 -07:00
|
|
|
for percentage, percentile in zip(percentages, percentiles):
|
|
|
|
print(f'{percentage}% percentile latency: {percentile} seconds')
|
2023-04-01 00:51:08 +08:00
|
|
|
|
2024-05-16 10:02:56 -07:00
|
|
|
# Output JSON results if specified
|
|
|
|
if args.output_json:
|
|
|
|
results = {
|
|
|
|
"avg_latency": np.mean(latencies),
|
|
|
|
"latencies": latencies.tolist(),
|
|
|
|
"percentiles": dict(zip(percentages, percentiles.tolist())),
|
|
|
|
}
|
|
|
|
with open(args.output_json, "w") as f:
|
|
|
|
json.dump(results, f, indent=4)
|
|
|
|
|
2023-04-01 00:51:08 +08:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2023-04-30 15:42:17 +08:00
|
|
|
parser = argparse.ArgumentParser(
|
2023-05-22 17:03:40 -07:00
|
|
|
description='Benchmark the latency of processing a single batch of '
|
2023-09-16 00:03:37 -07:00
|
|
|
'requests till completion.')
|
2023-05-22 17:03:40 -07:00
|
|
|
parser.add_argument('--model', type=str, default='facebook/opt-125m')
|
2024-05-16 00:53:51 -07:00
|
|
|
parser.add_argument('--speculative-model', type=str, default=None)
|
|
|
|
parser.add_argument('--num-speculative-tokens', type=int, default=None)
|
2023-06-28 09:46:58 -07:00
|
|
|
parser.add_argument('--tokenizer', type=str, default=None)
|
2023-09-16 00:03:37 -07:00
|
|
|
parser.add_argument('--quantization',
|
|
|
|
'-q',
|
2024-04-18 03:21:55 -04:00
|
|
|
choices=[*QUANTIZATION_METHODS, None],
|
2023-09-16 00:03:37 -07:00
|
|
|
default=None)
|
2023-05-22 17:03:40 -07:00
|
|
|
parser.add_argument('--tensor-parallel-size', '-tp', type=int, default=1)
|
2023-04-01 00:51:08 +08:00
|
|
|
parser.add_argument('--input-len', type=int, default=32)
|
|
|
|
parser.add_argument('--output-len', type=int, default=128)
|
|
|
|
parser.add_argument('--batch-size', type=int, default=8)
|
2023-09-16 00:03:37 -07:00
|
|
|
parser.add_argument('--n',
|
|
|
|
type=int,
|
|
|
|
default=1,
|
2023-05-22 17:03:40 -07:00
|
|
|
help='Number of generated sequences per prompt.')
|
2023-04-07 17:45:07 -07:00
|
|
|
parser.add_argument('--use-beam-search', action='store_true')
|
2024-04-06 14:32:30 -07:00
|
|
|
parser.add_argument('--num-iters-warmup',
|
|
|
|
type=int,
|
|
|
|
default=10,
|
|
|
|
help='Number of iterations to run for warmup.')
|
2023-09-16 00:03:37 -07:00
|
|
|
parser.add_argument('--num-iters',
|
|
|
|
type=int,
|
2024-04-06 14:32:30 -07:00
|
|
|
default=30,
|
2023-05-22 17:03:40 -07:00
|
|
|
help='Number of iterations to run.')
|
2023-09-16 00:03:37 -07:00
|
|
|
parser.add_argument('--trust-remote-code',
|
|
|
|
action='store_true',
|
2023-07-20 08:06:15 +08:00
|
|
|
help='trust remote code from huggingface')
|
2023-10-01 00:04:03 -04:00
|
|
|
parser.add_argument(
|
|
|
|
'--dtype',
|
|
|
|
type=str,
|
|
|
|
default='auto',
|
|
|
|
choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'],
|
|
|
|
help='data type for model weights and activations. '
|
|
|
|
'The "auto" option will use FP16 precision '
|
|
|
|
'for FP32 and FP16 models, and BF16 precision '
|
|
|
|
'for BF16 models.')
|
2023-12-16 21:12:08 -08:00
|
|
|
parser.add_argument('--enforce-eager',
|
|
|
|
action='store_true',
|
|
|
|
help='enforce eager mode and disable CUDA graph')
|
2024-01-29 08:43:54 +08:00
|
|
|
parser.add_argument(
|
2024-05-22 13:28:20 -07:00
|
|
|
'--kv-cache-dtype',
|
2024-01-29 08:43:54 +08:00
|
|
|
type=str,
|
2024-05-22 13:28:20 -07:00
|
|
|
choices=['auto', 'fp8', 'fp8_e5m2', 'fp8_e4m3'],
|
|
|
|
default="auto",
|
|
|
|
help='Data type for kv cache storage. If "auto", will use model '
|
|
|
|
'data type. CUDA 11.8+ supports fp8 (=fp8_e4m3) and fp8_e5m2. '
|
|
|
|
'ROCm (AMD GPU) supports fp8 (=fp8_e4m3)')
|
2024-04-03 16:15:55 -05:00
|
|
|
parser.add_argument(
|
|
|
|
'--quantization-param-path',
|
|
|
|
type=str,
|
|
|
|
default=None,
|
|
|
|
help='Path to the JSON file containing the KV cache scaling factors. '
|
|
|
|
'This should generally be supplied, when KV cache dtype is FP8. '
|
|
|
|
'Otherwise, KV cache scaling factors default to 1.0, which may cause '
|
|
|
|
'accuracy issues. FP8_E5M2 (without scaling) is only supported on '
|
|
|
|
'cuda version greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is '
|
|
|
|
'instead supported for common inference criteria.')
|
2023-11-29 23:42:52 -08:00
|
|
|
parser.add_argument(
|
|
|
|
'--profile',
|
|
|
|
action='store_true',
|
|
|
|
help='profile the generation process of a single batch')
|
2023-12-05 20:55:55 -08:00
|
|
|
parser.add_argument(
|
|
|
|
'--profile-result-dir',
|
|
|
|
type=str,
|
|
|
|
default=None,
|
2024-01-24 00:26:37 +01:00
|
|
|
help=('path to save the pytorch profiler output. Can be visualized '
|
|
|
|
'with ui.perfetto.dev or Tensorboard.'))
|
2024-02-02 07:46:39 +08:00
|
|
|
parser.add_argument(
|
|
|
|
"--device",
|
|
|
|
type=str,
|
|
|
|
default="cuda",
|
2024-06-18 02:01:25 +08:00
|
|
|
choices=["cuda", "cpu", "tpu", "xpu"],
|
2024-04-10 12:30:03 +08:00
|
|
|
help='device type for vLLM execution, supporting CUDA and CPU.')
|
2024-03-29 02:06:01 +09:00
|
|
|
parser.add_argument('--block-size',
|
|
|
|
type=int,
|
|
|
|
default=16,
|
|
|
|
help='block size of key/value cache')
|
|
|
|
parser.add_argument(
|
|
|
|
'--enable-chunked-prefill',
|
2024-04-11 09:56:48 +09:00
|
|
|
action='store_true',
|
2024-03-29 02:06:01 +09:00
|
|
|
help='If True, the prefill requests can be chunked based on the '
|
|
|
|
'max_num_batched_tokens')
|
2024-05-16 00:53:51 -07:00
|
|
|
parser.add_argument('--use-v2-block-manager', action='store_true')
|
2024-03-03 16:19:13 -08:00
|
|
|
parser.add_argument(
|
|
|
|
"--ray-workers-use-nsight",
|
|
|
|
action='store_true',
|
|
|
|
help="If specified, use nsight to profile ray workers",
|
|
|
|
)
|
2024-03-27 16:39:05 -04:00
|
|
|
parser.add_argument('--download-dir',
|
|
|
|
type=str,
|
|
|
|
default=None,
|
|
|
|
help='directory to download and load the weights, '
|
|
|
|
'default to the default cache dir of huggingface')
|
2024-05-16 10:02:56 -07:00
|
|
|
parser.add_argument(
|
|
|
|
'--output-json',
|
|
|
|
type=str,
|
|
|
|
default=None,
|
|
|
|
help='Path to save the latency results in JSON format.')
|
2024-05-28 17:16:18 -07:00
|
|
|
parser.add_argument('--gpu-memory-utilization',
|
|
|
|
type=float,
|
|
|
|
default=0.9,
|
|
|
|
help='the fraction of GPU memory to be used for '
|
|
|
|
'the model executor, which can range from 0 to 1.'
|
|
|
|
'If unspecified, will use the default value of 0.9.')
|
2024-06-13 22:36:20 -07:00
|
|
|
parser.add_argument(
|
|
|
|
'--load-format',
|
|
|
|
type=str,
|
|
|
|
default=EngineArgs.load_format,
|
|
|
|
choices=[
|
|
|
|
'auto', 'pt', 'safetensors', 'npcache', 'dummy', 'tensorizer',
|
|
|
|
'bitsandbytes'
|
|
|
|
],
|
|
|
|
help='The format of the model weights to load.\n\n'
|
|
|
|
'* "auto" will try to load the weights in the safetensors format '
|
|
|
|
'and fall back to the pytorch bin format if safetensors format '
|
|
|
|
'is not available.\n'
|
|
|
|
'* "pt" will load the weights in the pytorch bin format.\n'
|
|
|
|
'* "safetensors" will load the weights in the safetensors format.\n'
|
|
|
|
'* "npcache" will load the weights in pytorch format and store '
|
|
|
|
'a numpy cache to speed up the loading.\n'
|
|
|
|
'* "dummy" will initialize the weights with random values, '
|
|
|
|
'which is mainly for profiling.\n'
|
|
|
|
'* "tensorizer" will load the weights using tensorizer from '
|
|
|
|
'CoreWeave. See the Tensorize vLLM Model script in the Examples'
|
|
|
|
'section for more information.\n'
|
|
|
|
'* "bitsandbytes" will load the weights using bitsandbytes '
|
|
|
|
'quantization.\n')
|
2024-06-07 18:20:16 -07:00
|
|
|
parser.add_argument(
|
|
|
|
'--distributed-executor-backend',
|
|
|
|
choices=['ray', 'mp'],
|
|
|
|
default=None,
|
|
|
|
help='Backend to use for distributed serving. When more than 1 GPU '
|
|
|
|
'is used, will be automatically set to "ray" if installed '
|
|
|
|
'or "mp" (multiprocessing) otherwise.')
|
2023-04-01 00:51:08 +08:00
|
|
|
args = parser.parse_args()
|
|
|
|
main(args)
|