diff --git a/.buildkite/nightly-benchmarks/README.md b/.buildkite/nightly-benchmarks/README.md index 4036b32a..c84e1509 100644 --- a/.buildkite/nightly-benchmarks/README.md +++ b/.buildkite/nightly-benchmarks/README.md @@ -1,5 +1,6 @@ # vLLM benchmark suite + ## Introduction This directory contains the performance benchmarking CI for vllm. diff --git a/.buildkite/nightly-benchmarks/kickoff-pipeline.sh b/.buildkite/nightly-benchmarks/kickoff-pipeline.sh deleted file mode 100755 index 15d411fe..00000000 --- a/.buildkite/nightly-benchmarks/kickoff-pipeline.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -# NOTE(simon): this script runs inside a buildkite agent with CPU only access. -set -euo pipefail - -# Install system packages -apt update -apt install -y curl jq - -# Install minijinja for templating -curl -sSfL https://github.com/mitsuhiko/minijinja/releases/latest/download/minijinja-cli-installer.sh | sh -source $HOME/.cargo/env - -# If BUILDKITE_PULL_REQUEST != "false", then we check the PR labels using curl and jq -if [ "$BUILDKITE_PULL_REQUEST" != "false" ]; then - PR_LABELS=$(curl -s "https://api.github.com/repos/vllm-project/vllm/pulls/$BUILDKITE_PULL_REQUEST" | jq -r '.labels[].name') - - if [[ $PR_LABELS == *"perf-benchmarks"* ]]; then - echo "This PR has the 'perf-benchmarks' label. Proceeding with the nightly benchmarks." - else - echo "This PR does not have the 'perf-benchmarks' label. Skipping the nightly benchmarks." - exit 0 - fi -fi - -# Upload sample.yaml -buildkite-agent pipeline upload .buildkite/nightly-benchmarks/benchmark-pipeline.yaml diff --git a/.buildkite/nightly-benchmarks/nightly-descriptions.md b/.buildkite/nightly-benchmarks/nightly-descriptions.md new file mode 100644 index 00000000..c3d3cbf4 --- /dev/null +++ b/.buildkite/nightly-benchmarks/nightly-descriptions.md @@ -0,0 +1,45 @@ + +# Nightly benchmark + +The main goal of this benchmarking is two-fold: +- Performance clarity: Provide clarity on which one (vllm, tensorrt-llm, lmdeploy and tgi) leads in performance in what workload. +- Reproducible: one can run the exact same set of benchmarking commands inside the exact same docker by following reproducing instructions in [reproduce.md](). + + +## Docker images + +We benchmark vllm, tensorrt-llm, lmdeploy and tgi using the following docker images: +- vllm/vllm-openai:v0.5.0.post1 +- nvcr.io/nvidia/tritonserver:24.04-trtllm-python-py3 +- openmmlab/lmdeploy:v0.5.0 +- ghcr.io/huggingface/text-generation-inference:2.1 + + + + +## Hardware + +One AWS node with 8x NVIDIA A100 GPUs. + + +## Workload description + +We benchmark vllm, tensorrt-llm, lmdeploy and tgi using the following workload: + +- Input length: randomly sample 500 prompts from ShareGPT dataset (with fixed random seed). +- Output length: the corresponding output length of these 500 prompts. +- Models: llama-3 8B, llama-3 70B, mixtral 8x7B. +- Average QPS (query per second): 4 for the small model (llama-3 8B) and 2 for other two models. For each QPS, the arrival time of each query is determined using a random Poisson process (with fixed random seed). +- Evaluation metrics: Throughput (higher the better), TTFT (time to the first token, lower the better), ITL (inter-token latency, lower the better). + + + +## Plots + +In the following plots, the dot shows the mean and the error bar shows the standard error of the mean. Value 0 means that the corresponding benchmark crashed. + +Benchmarking results + +## Results + +{nightly_results_benchmarking_table} diff --git a/.buildkite/nightly-benchmarks/nightly-pipeline.yaml b/.buildkite/nightly-benchmarks/nightly-pipeline.yaml new file mode 100644 index 00000000..6e399bb9 --- /dev/null +++ b/.buildkite/nightly-benchmarks/nightly-pipeline.yaml @@ -0,0 +1,120 @@ +common_pod_spec: &common_pod_spec + priorityClassName: perf-benchmark + nodeSelector: + nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB + volumes: + - name: devshm + emptyDir: + medium: Memory + - name: hf-cache + hostPath: + path: /root/.cache/huggingface + type: Directory + +common_container_settings: &common_container_settings + command: + - bash .buildkite/nightly-benchmarks/run-nightly-suite.sh + resources: + limits: + nvidia.com/gpu: 8 + volumeMounts: + - name: devshm + mountPath: /dev/shm + - name: hf-cache + mountPath: /root/.cache/huggingface + env: + - name: VLLM_USAGE_SOURCE + value: ci-test + - name: HF_HOME + value: /root/.cache/huggingface + - name: VLLM_SOURCE_CODE_LOC + value: /workspace/build/buildkite/vllm/performance-benchmark + - name: HF_TOKEN + valueFrom: + secretKeyRef: + name: hf-token-secret + key: token + +steps: + - block: ":rocket: Ready for comparing vllm against alternatives? This will take 4 hours." + - label: "A100 trt benchmark" + priority: 100 + agents: + queue: A100 + plugins: + - kubernetes: + podSpec: + <<: *common_pod_spec + containers: + - image: nvcr.io/nvidia/tritonserver:24.04-trtllm-python-py3 + <<: *common_container_settings + + - label: "A100 lmdeploy benchmark" + priority: 100 + agents: + queue: A100 + plugins: + - kubernetes: + podSpec: + <<: *common_pod_spec + containers: + - image: openmmlab/lmdeploy:v0.5.0 + <<: *common_container_settings + + + - label: "A100 vllm benchmark" + priority: 100 + agents: + queue: A100 + plugins: + - kubernetes: + podSpec: + <<: *common_pod_spec + containers: + - image: vllm/vllm-openai:latest + <<: *common_container_settings + + - label: "A100 tgi benchmark" + priority: 100 + agents: + queue: A100 + plugins: + - kubernetes: + podSpec: + <<: *common_pod_spec + containers: + - image: ghcr.io/huggingface/text-generation-inference:2.1 + <<: *common_container_settings + + - wait + + - label: "Plot" + priority: 100 + agents: + queue: A100 + plugins: + - kubernetes: + podSpec: + <<: *common_pod_spec + containers: + - image: vllm/vllm-openai:v0.5.0.post1 + command: + - bash .buildkite/nightly-benchmarks/scripts/nightly-annotate.sh + resources: + limits: + nvidia.com/gpu: 8 + volumeMounts: + - name: devshm + mountPath: /dev/shm + env: + - name: VLLM_USAGE_SOURCE + value: ci-test + - name: VLLM_SOURCE_CODE_LOC + value: /workspace/build/buildkite/vllm/performance-benchmark + - name: HF_TOKEN + valueFrom: + secretKeyRef: + name: hf-token-secret + key: token + + - wait \ No newline at end of file diff --git a/.buildkite/nightly-benchmarks/run-nightly-suite.sh b/.buildkite/nightly-benchmarks/run-nightly-suite.sh new file mode 100644 index 00000000..627a3e69 --- /dev/null +++ b/.buildkite/nightly-benchmarks/run-nightly-suite.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +set -o pipefail +set -x + +check_gpus() { + # check the number of GPUs and GPU type. + declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l) + if [[ $gpu_count -gt 0 ]]; then + echo "GPU found." + else + echo "Need at least 1 GPU to run benchmarking." + exit 1 + fi + declare -g gpu_type=$(echo $(nvidia-smi --query-gpu=name --format=csv,noheader) | awk '{print $2}') + echo "GPU type is $gpu_type" +} + +check_hf_token() { + # check if HF_TOKEN is available and valid + if [[ -z "$HF_TOKEN" ]]; then + echo "Error: HF_TOKEN is not set." + exit 1 + elif [[ ! "$HF_TOKEN" =~ ^hf_ ]]; then + echo "Error: HF_TOKEN does not start with 'hf_'." + exit 1 + else + echo "HF_TOKEN is set and valid." + fi +} + +main() { + + check_gpus + check_hf_token + + df -h + + (which wget && which curl) || (apt-get update && apt-get install -y wget curl) + (which jq) || (apt-get update && apt-get -y install jq) + + cd $VLLM_SOURCE_CODE_LOC/benchmarks + wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json + + + # run lmdeploy + if which lmdeploy >/dev/null; then + echo "lmdeploy is available, redirect to run-lmdeploy-nightly.sh" + bash ../.buildkite/nightly-benchmarks/scripts/run-lmdeploy-nightly.sh + exit 0 + fi + + # run tgi + if [ -e /tgi-entrypoint.sh ]; then + echo "tgi is available, redirect to run-tgi-nightly.sh" + bash ../.buildkite/nightly-benchmarks/scripts/run-tgi-nightly.sh + exit 0 + fi + + # run trt + if which trtllm-build >/dev/null; then + echo "trtllm is available, redirect to run-trt-nightly.sh" + bash ../.buildkite/nightly-benchmarks/scripts/run-trt-nightly.sh + exit 0 + fi + + # run vllm + if [ -e /vllm-workspace ]; then + echo "vllm is available, redirect to run-vllm-nightly.sh" + bash ../.buildkite/nightly-benchmarks/scripts/run-vllm-nightly.sh + exit 0 + fi + +} + +main "$@" \ No newline at end of file diff --git a/.buildkite/nightly-benchmarks/scripts/download-tokenizer.py b/.buildkite/nightly-benchmarks/scripts/download-tokenizer.py new file mode 100644 index 00000000..68ac5909 --- /dev/null +++ b/.buildkite/nightly-benchmarks/scripts/download-tokenizer.py @@ -0,0 +1,26 @@ +import argparse + +from transformers import AutoTokenizer + + +def main(model, cachedir): + # Load the tokenizer and save it to the specified directory + tokenizer = AutoTokenizer.from_pretrained(model) + tokenizer.save_pretrained(cachedir) + print(f"Tokenizer saved to {cachedir}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Download and save Hugging Face tokenizer") + parser.add_argument("--model", + type=str, + required=True, + help="Name of the model") + parser.add_argument("--cachedir", + type=str, + required=True, + help="Directory to save the tokenizer") + + args = parser.parse_args() + main(args.model, args.cachedir) diff --git a/.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py b/.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py new file mode 100644 index 00000000..18bcc3a8 --- /dev/null +++ b/.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py @@ -0,0 +1,6 @@ +from lmdeploy.serve.openai.api_client import APIClient + +api_client = APIClient("http://localhost:8000") +model_name = api_client.available_models[0] + +print(model_name) diff --git a/.buildkite/nightly-benchmarks/scripts/launch-trt-server.sh b/.buildkite/nightly-benchmarks/scripts/launch-trt-server.sh new file mode 100644 index 00000000..f8262653 --- /dev/null +++ b/.buildkite/nightly-benchmarks/scripts/launch-trt-server.sh @@ -0,0 +1,102 @@ +#!/bin/bash + + +server_params=$1 +common_params=$2 + + + +model_path=$(echo "$common_params" | jq -r '.model') +model_name="${model_path#*/}" +model_type=$(echo "$server_params" | jq -r '.model_type') +model_dtype=$(echo "$server_params" | jq -r '.model_dtype') +model_tp_size=$(echo "$common_params" | jq -r '.tp') +max_batch_size=$(echo "$server_params" | jq -r '.max_batch_size') +max_input_len=$(echo "$server_params" | jq -r '.max_input_len') +max_output_len=$(echo "$server_params" | jq -r '.max_output_len') +trt_llm_version=$(echo "$server_params" | jq -r '.trt_llm_version') + +cd ~ +rm -rf models +mkdir -p models +cd models +models_dir=$(pwd) +trt_model_path=${models_dir}/${model_name}-trt-ckpt +trt_engine_path=${models_dir}/${model_name}-trt-engine + +cd ~ +rm -rf tensorrt-demo +git clone https://github.com/neuralmagic/tensorrt-demo.git +cd tensorrt-demo +tensorrt_demo_dir=$(pwd) + +# make sure the parameter inside tensorrt_demo is consistent to envvar +sed -i.bak "/key: \"tokenizer_dir\"/,/string_value:/s|string_value: \".*\"|string_value: \"$model_path\"|" ./triton_model_repo/postprocessing/config.pbtxt +sed -i.bak "/key: \"tokenizer_dir\"/,/string_value:/s|string_value: \".*\"|string_value: \"$model_path\"|" ./triton_model_repo/preprocessing/config.pbtxt +sed -i.bak "s|\(max_batch_size:\s*\)[0-9]*|\1$max_batch_size|g" ./triton_model_repo/ensemble/config.pbtxt +sed -i.bak "s|\(max_batch_size:\s*\)[0-9]*|\1$max_batch_size|g" ./triton_model_repo/preprocessing/config.pbtxt +sed -i.bak "s|\(max_batch_size:\s*\)[0-9]*|\1$max_batch_size|g" ./triton_model_repo/postprocessing/config.pbtxt +sed -i.bak "s|\(max_batch_size:\s*\)[0-9]*|\1$max_batch_size|g" ./triton_model_repo/tensorrt_llm_bls/config.pbtxt + + +cd / +rm -rf tensorrtllm_backend +git clone https://github.com/triton-inference-server/tensorrtllm_backend.git +git lfs install +cd tensorrtllm_backend +git checkout $trt_llm_version +tensorrtllm_backend_dir=$(pwd) +git submodule update --init --recursive +cp -r ${tensorrt_demo_dir}/triton_model_repo ${tensorrtllm_backend_dir}/ + +cd /tensorrtllm_backend +cd ./tensorrt_llm/examples/${model_type} + + +if echo "$common_params" | jq -e 'has("fp8")' > /dev/null; then + + echo "Key 'fp8' exists in common params. Use quantize.py instead of convert_checkpoint.py" + echo "Reference: https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/llama/README.md" + python ../quantization/quantize.py \ + --model_dir ${model_path} \ + --dtype ${model_dtype} \ + --tp_size ${model_tp_size} \ + --output_dir ${trt_model_path} \ + --qformat fp8 \ + --kv_cache_dtype fp8 \ + --calib_size 2 + +else + + echo "Key 'fp8' does not exist in common params. Use convert_checkpoint.py" + python3 convert_checkpoint.py \ + --model_dir ${model_path} \ + --dtype ${model_dtype} \ + --tp_size ${model_tp_size} \ + --output_dir ${trt_model_path} + +fi + + + +trtllm-build \ +--checkpoint_dir=${trt_model_path} \ +--gpt_attention_plugin=${model_dtype} \ +--gemm_plugin=${model_dtype} \ +--remove_input_padding=enable \ +--paged_kv_cache=enable \ +--tp_size=${model_tp_size} \ +--max_batch_size=${max_batch_size} \ +--max_input_len=${max_input_len} \ +--max_output_len=${max_output_len} \ +--max_num_tokens=${max_output_len} \ +--opt_num_tokens=${max_output_len} \ +--output_dir=${trt_engine_path} + +cd /tensorrtllm_backend/triton_model_repo +rm -rf ./tensorrt_llm/1/* +cp -r ${trt_engine_path}/* ./tensorrt_llm/1 +cd /tensorrtllm_backend +python3 scripts/launch_triton_server.py \ +--world_size=${model_tp_size} \ +--model_repo=/tensorrtllm_backend/triton_model_repo & \ No newline at end of file diff --git a/.buildkite/nightly-benchmarks/scripts/nightly-annotate.sh b/.buildkite/nightly-benchmarks/scripts/nightly-annotate.sh new file mode 100644 index 00000000..1168912c --- /dev/null +++ b/.buildkite/nightly-benchmarks/scripts/nightly-annotate.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +set -ex +set -o pipefail + + +main() { + + (which wget && which curl) || (apt-get update && apt-get install -y wget curl) + (which jq) || (apt-get update && apt-get -y install jq) + + if [ ! -f /workspace/buildkite-agent ]; then + echo "buildkite-agent binary not found. Skip plotting the results." + exit 0 + fi + + # initial annotation + description="$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/nightly-descriptions.md" + + # download results + cd $VLLM_SOURCE_CODE_LOC/benchmarks + mkdir -p results/ + /workspace/buildkite-agent artifact download 'results/*nightly_results.json' results/ + ls + ls results/ + + # generate figures + python3 -m pip install tabulate pandas matplotlib + python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py \ + --description $description \ + --results-folder results/ + + # upload results and figures + /workspace/buildkite-agent artifact upload "nightly_results.png" + /workspace/buildkite-agent artifact upload $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/nightly-pipeline.yaml + /workspace/buildkite-agent artifact upload $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/tests/nightly-tests.json + /workspace/buildkite-agent annotate --style "success" --context "nightly-benchmarks-results" --append < nightly_results.md +} + +main "$@" \ No newline at end of file diff --git a/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py b/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py new file mode 100644 index 00000000..e5cfcc64 --- /dev/null +++ b/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py @@ -0,0 +1,135 @@ +import argparse +import json +import math +from pathlib import Path + +import matplotlib.pyplot as plt +import pandas as pd +from tabulate import tabulate + + +def parse_arguments(): + parser = argparse.ArgumentParser( + description= + 'Parse command line arguments for summary-nightly-results script.') + parser.add_argument('--results-folder', + type=str, + required=True, + help='The folder where the results are stored.') + parser.add_argument('--description', + type=str, + required=True, + help='Description of the results.') + + args = parser.parse_args() + return args + + +def main(args): + bar_colors = ['#56B4E9', '#009E73', '#D55E00', '#E69F00'] + results_folder = Path(args.results_folder) + + results = [] + + # collect results + for test_file in results_folder.glob("*_nightly_results.json"): + with open(test_file, "r") as f: + results = results + json.loads(f.read()) + + # generate markdown table + df = pd.DataFrame.from_dict(results) + + md_table = tabulate(df, headers='keys', tablefmt='pipe', showindex=False) + + with open(args.description, "r") as f: + description = f.read() + + description = description.format( + nightly_results_benchmarking_table=md_table) + + with open("nightly_results.md", "w") as f: + f.write(description) + + plt.rcParams.update({'font.size': 20}) + + # plot results + fig, axes = plt.subplots(3, 3, figsize=(16, 14)) + fig.subplots_adjust(hspace=1) + methods = ["vllm", "trt", "lmdeploy", "tgi"] + for i, model in enumerate(["llama8B", "llama70B", "mixtral8x7B"]): + for j, metric in enumerate(["TTFT", "ITL"]): + means, stds = [], [] + for method in methods: + target = df['Test name'].str.contains(model) + target = target & df['Engine'].str.contains(method) + filtered_df = df[target] + + if filtered_df.empty: + means.append(0.) + stds.append(0.) + else: + means.append(filtered_df[f"Mean {metric} (ms)"].values[0]) + std = filtered_df[f"Std {metric} (ms)"].values[0] + success = filtered_df["Successful req."].values[0] + stds.append(std / math.sqrt(success)) + + print(model, metric) + print(means, stds) + + ax = axes[i, j + 1] + + bars = ax.bar( + ["vllm", "trt", "lmdeploy", "tgi"], + means, + yerr=stds, + capsize=10, + ) + for idx, bar in enumerate(bars): + bar.set_color(bar_colors[idx]) + ax.set_ylim(bottom=0) + + ax.set_ylabel(f"{metric} (ms)") + ax.set_title(f"{model} {metric}") + ax.grid(axis='y') + + metric = "Tput" + j = 0 + if True: + tputs = [] + for method in methods: + target = df['Test name'].str.contains(model) + target = target & df['Engine'].str.contains(method) + filtered_df = df[target] + + if filtered_df.empty: + tputs.append(0.) + else: + input_tput = filtered_df["Input Tput (tok/s)"].values[0] + output_tput = filtered_df["Output Tput (tok/s)"].values[0] + tputs.append(input_tput + output_tput) + + print(model, metric) + print(tputs) + + ax = axes[i, j] + + bars = ax.bar( + ["vllm", "trt", "lmdeploy", "tgi"], + tputs, + ) + for idx, bar in enumerate(bars): + bar.set_color(bar_colors[idx]) + + ax.set_ylim(bottom=0) + + ax.set_ylabel("Tput (token/s)") + ax.set_title(f"{model} {metric}") + ax.grid(axis='y') + + fig.tight_layout() + fig.savefig("nightly_results.png", bbox_inches='tight', dpi=400) + + +if __name__ == '__main__': + args = parse_arguments() + main(args) diff --git a/.buildkite/nightly-benchmarks/scripts/run-lmdeploy-nightly.sh b/.buildkite/nightly-benchmarks/scripts/run-lmdeploy-nightly.sh new file mode 100644 index 00000000..d6f112aa --- /dev/null +++ b/.buildkite/nightly-benchmarks/scripts/run-lmdeploy-nightly.sh @@ -0,0 +1,218 @@ +#!/bin/bash + +set -o pipefail + +check_gpus() { + # check the number of GPUs and GPU type. + declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l) + if [[ $gpu_count -gt 0 ]]; then + echo "GPU found." + else + echo "Need at least 1 GPU to run benchmarking." + exit 1 + fi + declare -g gpu_type=$(echo $(nvidia-smi --query-gpu=name --format=csv,noheader) | awk '{print $2}') + echo "GPU type is $gpu_type" +} + +kill_gpu_processes() { + pkill lmdeploy || true + # waiting for GPU processes to be fully killed + sleep 10 + # Print the GPU memory usage + # so that we know if all GPU processes are killed. + gpu_memory_usage=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0) + # The memory usage should be 0 MB. + echo "GPU 0 Memory Usage: $gpu_memory_usage MB" +} + +json2args() { + # transforms the JSON string to command line args, and '_' is replaced to '-' + # example: + # input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 } + # output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1 + local json_string=$1 + local args=$( + echo "$json_string" | jq -r ' + to_entries | + map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) | + join(" ") + ' + ) + echo "$args" +} + +wait_for_server() { + # wait for vllm server to start + # return 1 if vllm server crashes + timeout 1200 bash -c ' + until curl -s localhost:8000/v1/completions > /dev/null; do + sleep 1 + done' && return 0 || return 1 +} + +run_serving_tests() { + # run serving tests using `benchmark_serving.py` + # $1: a json file specifying serving test cases + + local serving_test_file + serving_test_file=$1 + + # Iterate over serving tests + jq -c '.[]' "$serving_test_file" | while read -r params; do + # get the test name, and append the GPU type back to it. + test_name=$(echo "$params" | jq -r '.test_name') + + # if TEST_SELECTOR is set, only run the test cases that match the selector + if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then + echo "Skip test case $test_name." + continue + fi + + # append lmdeploy to the test name + test_name=lmdeploy_$test_name + + # get common parameters + common_params=$(echo "$params" | jq -r '.common_parameters') + model=$(echo "$common_params" | jq -r '.model') + tp=$(echo "$common_params" | jq -r '.tp') + dataset_name=$(echo "$common_params" | jq -r '.dataset_name') + dataset_path=$(echo "$common_params" | jq -r '.dataset_path') + port=$(echo "$common_params" | jq -r '.port') + num_prompts=$(echo "$common_params" | jq -r '.num_prompts') + + + + # get client and server arguments + server_params=$(echo "$params" | jq -r '.lmdeploy_server_parameters') + client_params=$(echo "$params" | jq -r '.lmdeploy_client_parameters') + server_args=$(json2args "$server_params") + client_args=$(json2args "$client_params") + qps_list=$(echo "$params" | jq -r '.qps_list') + qps_list=$(echo "$qps_list" | jq -r '.[] | @sh') + echo "Running over qps list $qps_list" + + # check if there is enough GPU to run the test + if [[ $gpu_count -lt $tp ]]; then + echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $test_name." + continue + fi + + # prepare tokenizer + rm -rf /tokenizer_cache + mkdir /tokenizer_cache + python ../.buildkite/nightly-benchmarks/scripts/download-tokenizer.py \ + --model "$model" \ + --cachedir /tokenizer_cache + + server_command="lmdeploy serve api_server $model \ + --tp $tp \ + --server-port $port \ + $server_args" + + # run the server + echo "Running test case $test_name" + echo "Server command: $server_command" + bash -c "$server_command" & + + # wait until the server is alive + wait_for_server + if [ $? -eq 0 ]; then + echo "" + echo "lmdeploy server is up and running." + else + echo "" + echo "lmdeploy failed to start within the timeout period." + break + fi + + # get model name + model_name=$(python ../.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py) + + # iterate over different QPS + for qps in $qps_list; do + # remove the surrounding single quote from qps + if [[ "$qps" == *"inf"* ]]; then + echo "qps was $qps" + qps="inf" + echo "now qps is $qps" + fi + + new_test_name=$test_name"_qps_"$qps + + client_command="python3 benchmark_serving.py \ + --backend lmdeploy \ + --tokenizer /tokenizer_cache \ + --dataset-name $dataset_name \ + --dataset-path $dataset_path \ + --num-prompts $num_prompts \ + --port $port \ + --save-result \ + --result-dir $RESULTS_FOLDER \ + --result-filename ${new_test_name}.json \ + --request-rate $qps \ + --model \"$model_name\" \ + $client_args" + + echo "Running test case $test_name with qps $qps" + echo "Client command: $client_command" + + eval "$client_command" + + # record the benchmarking commands + jq_output=$(jq -n \ + --arg server "$server_command" \ + --arg client "$client_command" \ + --arg gpu "$gpu_type" \ + --arg engine "lmdeploy" \ + '{ + server_command: $server, + client_command: $client, + gpu_type: $gpu, + engine: $engine + }') + echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands" + + done + + # clean up + kill_gpu_processes + rm -rf /root/.cache/huggingface/* + done +} + + +upload_to_buildkite() { + # upload the benchmarking results to buildkite + + # if the agent binary is not found, skip uploading the results, exit 0 + if [ ! -f /workspace/buildkite-agent ]; then + echo "buildkite-agent binary not found. Skip uploading the results." + return 0 + fi + # /workspace/buildkite-agent annotate --style "success" --context "benchmark-results" --append < $RESULTS_FOLDER/${CURRENT_LLM_SERVING_ENGINE}_nightly_results.md + /workspace/buildkite-agent artifact upload "$RESULTS_FOLDER/*" +} + + +main() { + + check_gpus + # enter vllm directory + cd $VLLM_SOURCE_CODE_LOC/benchmarks + + declare -g RESULTS_FOLDER=results/ + mkdir -p $RESULTS_FOLDER + BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/ + + python -m pip install transformers==4.41.2 + + export CURRENT_LLM_SERVING_ENGINE=lmdeploy + run_serving_tests $BENCHMARK_ROOT/tests/nightly-tests.json + python -m pip install tabulate pandas + python $BENCHMARK_ROOT/scripts/summary-nightly-results.py + upload_to_buildkite + +} + +main "$@" diff --git a/.buildkite/nightly-benchmarks/scripts/run-tgi-nightly.sh b/.buildkite/nightly-benchmarks/scripts/run-tgi-nightly.sh new file mode 100644 index 00000000..fed03654 --- /dev/null +++ b/.buildkite/nightly-benchmarks/scripts/run-tgi-nightly.sh @@ -0,0 +1,216 @@ +#!/bin/bash + +set -o pipefail + +check_gpus() { + # check the number of GPUs and GPU type. + declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l) + if [[ $gpu_count -gt 0 ]]; then + echo "GPU found." + else + echo "Need at least 1 GPU to run benchmarking." + exit 1 + fi + declare -g gpu_type=$(echo $(nvidia-smi --query-gpu=name --format=csv,noheader) | awk '{print $2}') + echo "GPU type is $gpu_type" +} + +kill_gpu_processes() { + pkill text-generation || true + # waiting for GPU processes to be fully killed + sleep 10 + # Print the GPU memory usage + # so that we know if all GPU processes are killed. + gpu_memory_usage=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0) + # The memory usage should be 0 MB. + echo "GPU 0 Memory Usage: $gpu_memory_usage MB" +} + +json2args() { + # transforms the JSON string to command line args, and '_' is replaced to '-' + # example: + # input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 } + # output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1 + local json_string=$1 + local args=$( + echo "$json_string" | jq -r ' + to_entries | + map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) | + join(" ") + ' + ) + echo "$args" +} + +wait_for_server() { + timeout 1200 bash -c ' + until curl -s localhost:8000/generate_stream > /dev/null; do + sleep 1 + done' && return 0 || return 1 +} + +run_serving_tests() { + # run serving tests using `benchmark_serving.py` + # $1: a json file specifying serving test cases + + local serving_test_file + serving_test_file=$1 + + # Iterate over serving tests + jq -c '.[]' "$serving_test_file" | while read -r params; do + # get the test name, and append the GPU type back to it. + test_name=$(echo "$params" | jq -r '.test_name') + + + # if TEST_SELECTOR is set, only run the test cases that match the selector + if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then + echo "Skip test case $test_name." + continue + fi + + # append tgi to the test name + test_name=tgi_$test_name + + # get common parameters + common_params=$(echo "$params" | jq -r '.common_parameters') + model=$(echo "$common_params" | jq -r '.model') + tp=$(echo "$common_params" | jq -r '.tp') + dataset_name=$(echo "$common_params" | jq -r '.dataset_name') + dataset_path=$(echo "$common_params" | jq -r '.dataset_path') + port=$(echo "$common_params" | jq -r '.port') + num_prompts=$(echo "$common_params" | jq -r '.num_prompts') + + # get client and server arguments + server_params=$(echo "$params" | jq -r '.tgi_server_parameters') + client_params=$(echo "$params" | jq -r '.tgi_client_parameters') + server_args=$(json2args "$server_params") + client_args=$(json2args "$client_params") + qps_list=$(echo "$params" | jq -r '.qps_list') + qps_list=$(echo "$qps_list" | jq -r '.[] | @sh') + echo "Running over qps list $qps_list" + + # check if there is enough GPU to run the test + if [[ $gpu_count -lt $tp ]]; then + echo "Required num-shard $tp but only $gpu_count GPU found. Skip testcase $test_name." + continue + fi + + if echo "$common_params" | jq -e 'has("fp8")' > /dev/null; then + echo "Key 'fp8' exists in common params." + server_command="/tgi-entrypoint.sh \ + --model-id $model \ + --num-shard $tp \ + --port $port \ + --quantize fp8 \ + $server_args" + else + echo "Key 'fp8' does not exist in common params." + server_command="/tgi-entrypoint.sh \ + --model-id $model \ + --num-shard $tp \ + --port $port \ + $server_args" + fi + + + + + # run the server + echo "Running test case $test_name" + echo "Server command: $server_command" + eval "$server_command" & + + # wait until the server is alive + wait_for_server + if [ $? -eq 0 ]; then + echo "" + echo "tgi server is up and running." + else + echo "" + echo "tgi failed to start within the timeout period." + break + fi + + # iterate over different QPS + for qps in $qps_list; do + # remove the surrounding single quote from qps + if [[ "$qps" == *"inf"* ]]; then + echo "qps was $qps" + qps="inf" + echo "now qps is $qps" + fi + + new_test_name=$test_name"_qps_"$qps + + client_command="python3 benchmark_serving.py \ + --backend tgi \ + --model $model \ + --dataset-name $dataset_name \ + --dataset-path $dataset_path \ + --num-prompts $num_prompts \ + --port $port \ + --save-result \ + --result-dir $RESULTS_FOLDER \ + --result-filename ${new_test_name}.json \ + --request-rate $qps \ + $client_args" + + echo "Running test case $test_name with qps $qps" + echo "Client command: $client_command" + + eval "$client_command" + + # record the benchmarking commands + jq_output=$(jq -n \ + --arg server "$server_command" \ + --arg client "$client_command" \ + --arg gpu "$gpu_type" \ + --arg engine "tgi" \ + '{ + server_command: $server, + client_command: $client, + gpu_type: $gpu, + engine: $engine + }') + echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands" + + done + + # clean up + kill_gpu_processes + rm -rf /root/.cache/huggingface/* + done +} + + + +upload_to_buildkite() { + # upload the benchmarking results to buildkite + + # if the agent binary is not found, skip uploading the results, exit 0 + if [ ! -f /workspace/buildkite-agent ]; then + echo "buildkite-agent binary not found. Skip uploading the results." + return 0 + fi + # /workspace/buildkite-agent annotate --style "success" --context "benchmark-results" --append < $RESULTS_FOLDER/${CURRENT_LLM_SERVING_ENGINE}_nightly_results.md + /workspace/buildkite-agent artifact upload "$RESULTS_FOLDER/*" +} + +main() { + + check_gpus + # enter vllm directory + cd $VLLM_SOURCE_CODE_LOC/benchmarks + declare -g RESULTS_FOLDER=results/ + mkdir -p $RESULTS_FOLDER + BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/ + + export CURRENT_LLM_SERVING_ENGINE=tgi + run_serving_tests $BENCHMARK_ROOT/tests/nightly-tests.json + python -m pip install tabulate pandas + python $BENCHMARK_ROOT/scripts/summary-nightly-results.py + upload_to_buildkite + +} + +main "$@" diff --git a/.buildkite/nightly-benchmarks/scripts/run-trt-nightly.sh b/.buildkite/nightly-benchmarks/scripts/run-trt-nightly.sh new file mode 100644 index 00000000..4a82b9ec --- /dev/null +++ b/.buildkite/nightly-benchmarks/scripts/run-trt-nightly.sh @@ -0,0 +1,214 @@ +#!/bin/bash + +set -o pipefail + +check_gpus() { + # check the number of GPUs and GPU type. + declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l) + if [[ $gpu_count -gt 0 ]]; then + echo "GPU found." + else + echo "Need at least 1 GPU to run benchmarking." + exit 1 + fi + declare -g gpu_type=$(echo $(nvidia-smi --query-gpu=name --format=csv,noheader) | awk '{print $2}') + echo "GPU type is $gpu_type" +} + +kill_gpu_processes() { + pkill tritonserver || true + # waiting for GPU processes to be fully killed + sleep 20 + # Print the GPU memory usage + # so that we know if all GPU processes are killed. + gpu_memory_usage=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0) + # The memory usage should be 0 MB. + echo "GPU 0 Memory Usage: $gpu_memory_usage MB" +} + +json2args() { + # transforms the JSON string to command line args, and '_' is replaced to '-' + # example: + # input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 } + # output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1 + local json_string=$1 + local args=$( + echo "$json_string" | jq -r ' + to_entries | + map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) | + join(" ") + ' + ) + echo "$args" +} + +wait_for_server() { + timeout 1200 bash -c ' + until curl -s localhost:8000/generate_stream > /dev/null; do + sleep 1 + done' && return 0 || return 1 +} + +run_serving_tests() { + # run serving tests using `benchmark_serving.py` + # $1: a json file specifying serving test cases + + local serving_test_file + serving_test_file=$1 + + # Iterate over serving tests + jq -c '.[]' "$serving_test_file" | while read -r params; do + # get the test name, and append the GPU type back to it. + test_name=$(echo "$params" | jq -r '.test_name') + + # if TEST_SELECTOR is set, only run the test cases that match the selector + if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then + echo "Skip test case $test_name." + continue + fi + + # append trt to the test name + test_name=trt_$test_name + + # get common parameters + common_params=$(echo "$params" | jq -r '.common_parameters') + model=$(echo "$common_params" | jq -r '.model') + tp=$(echo "$common_params" | jq -r '.tp') + dataset_name=$(echo "$common_params" | jq -r '.dataset_name') + dataset_path=$(echo "$common_params" | jq -r '.dataset_path') + port=$(echo "$common_params" | jq -r '.port') + num_prompts=$(echo "$common_params" | jq -r '.num_prompts') + + # get client and server arguments + server_params=$(echo "$params" | jq -r '.trt_server_parameters') + client_params=$(echo "$params" | jq -r '.trt_client_parameters') + client_args=$(json2args "$client_params") + qps_list=$(echo "$params" | jq -r '.qps_list') + qps_list=$(echo "$qps_list" | jq -r '.[] | @sh') + echo "Running over qps list $qps_list" + + # check if there is enough GPU to run the test + if [[ $gpu_count -lt $tp ]]; then + echo "Required model_tp_size $tp but only $gpu_count GPU found. Skip testcase $test_name." + continue + fi + + + + cd $VLLM_SOURCE_CODE_LOC/benchmarks + + + echo "Running test case $test_name" + bash ../.buildkite/nightly-benchmarks/scripts/launch-trt-server.sh "$server_params" "$common_params" + + # wait until the server is alive + wait_for_server + if [ $? -eq 0 ]; then + echo "" + echo "trt server is up and running." + else + echo "" + echo "trt failed to start within the timeout period." + break + fi + + # prepare tokenizer + cd $VLLM_SOURCE_CODE_LOC/benchmarks + rm -rf /tokenizer_cache + mkdir /tokenizer_cache + python ../.buildkite/nightly-benchmarks/scripts/download-tokenizer.py \ + --model "$model" \ + --cachedir /tokenizer_cache + cd $VLLM_SOURCE_CODE_LOC/benchmarks + + + # iterate over different QPS + for qps in $qps_list; do + # remove the surrounding single quote from qps + if [[ "$qps" == *"inf"* ]]; then + echo "qps was $qps" + qps="inf" + echo "now qps is $qps" + fi + + new_test_name=$test_name"_qps_"$qps + + client_command="python3 benchmark_serving.py \ + --backend tensorrt-llm \ + --tokenizer /tokenizer_cache \ + --model $model \ + --dataset-name $dataset_name \ + --dataset-path $dataset_path \ + --num-prompts $num_prompts \ + --port $port \ + --save-result \ + --result-dir $RESULTS_FOLDER \ + --result-filename ${new_test_name}.json \ + --request-rate $qps \ + $client_args" + + echo "Running test case $test_name with qps $qps" + echo "Client command: $client_command" + + eval "$client_command" + + server_command="" + # record the benchmarking commands + jq_output=$(jq -n \ + --arg server "$server_command" \ + --arg client "$client_command" \ + --arg gpu "$gpu_type" \ + --arg engine "trt" \ + '{ + server_command: $server, + client_command: $client, + gpu_type: $gpu, + engine: $engine + }') + echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands" + + done + + # clean up + kill_gpu_processes + rm -rf /root/.cache/huggingface/* + done +} + +upload_to_buildkite() { + # upload the benchmarking results to buildkite + + # if the agent binary is not found, skip uploading the results, exit 0 + if [ ! -f /workspace/buildkite-agent ]; then + echo "buildkite-agent binary not found. Skip uploading the results." + return 0 + fi + # /workspace/buildkite-agent annotate --style "success" --context "benchmark-results" --append < $RESULTS_FOLDER/${CURRENT_LLM_SERVING_ENGINE}_nightly_results.md + /workspace/buildkite-agent artifact upload "$RESULTS_FOLDER/*" +} + + +main() { + + check_gpus + + + # enter vllm directory + cd $VLLM_SOURCE_CODE_LOC/benchmarks + + declare -g RESULTS_FOLDER=results/ + mkdir -p $RESULTS_FOLDER + BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/ + + # update transformers package, to make sure mixtral tokenizer is available + python -m pip install transformers -U + + export CURRENT_LLM_SERVING_ENGINE=trt + run_serving_tests $BENCHMARK_ROOT/tests/nightly-tests.json + python -m pip install tabulate pandas + python $BENCHMARK_ROOT/scripts/summary-nightly-results.py + upload_to_buildkite + +} + +main "$@" diff --git a/.buildkite/nightly-benchmarks/scripts/run-vllm-nightly.sh b/.buildkite/nightly-benchmarks/scripts/run-vllm-nightly.sh new file mode 100644 index 00000000..663045b8 --- /dev/null +++ b/.buildkite/nightly-benchmarks/scripts/run-vllm-nightly.sh @@ -0,0 +1,221 @@ +#!/bin/bash + +set -o pipefail + +check_gpus() { + # check the number of GPUs and GPU type. + declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l) + if [[ $gpu_count -gt 0 ]]; then + echo "GPU found." + else + echo "Need at least 1 GPU to run benchmarking." + exit 1 + fi + declare -g gpu_type=$(echo $(nvidia-smi --query-gpu=name --format=csv,noheader) | awk '{print $2}') + echo "GPU type is $gpu_type" +} + +kill_gpu_processes() { + # kill all processes on GPU. + pkill pt_main_thread + sleep 10 + + # remove vllm config file + rm -rf ~/.config/vllm + + # Print the GPU memory usage + # so that we know if all GPU processes are killed. + gpu_memory_usage=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0) + # The memory usage should be 0 MB. + echo "GPU 0 Memory Usage: $gpu_memory_usage MB" +} + +json2args() { + # transforms the JSON string to command line args, and '_' is replaced to '-' + # example: + # input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 } + # output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1 + local json_string=$1 + local args=$( + echo "$json_string" | jq -r ' + to_entries | + map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) | + join(" ") + ' + ) + echo "$args" +} + +wait_for_server() { + # wait for vllm server to start + # return 1 if vllm server crashes + timeout 1200 bash -c ' + until curl -s localhost:8000/v1/completions > /dev/null; do + sleep 1 + done' && return 0 || return 1 +} + +run_serving_tests() { + # run serving tests using `benchmark_serving.py` + # $1: a json file specifying serving test cases + + local serving_test_file + serving_test_file=$1 + + # Iterate over serving tests + jq -c '.[]' "$serving_test_file" | while read -r params; do + # get the test name, and append the GPU type back to it. + test_name=$(echo "$params" | jq -r '.test_name') + + # if TEST_SELECTOR is set, only run the test cases that match the selector + if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then + echo "Skip test case $test_name." + continue + fi + + # append vllm to the test name + test_name=vllm_$test_name + + + # get common parameters + common_params=$(echo "$params" | jq -r '.common_parameters') + model=$(echo "$common_params" | jq -r '.model') + tp=$(echo "$common_params" | jq -r '.tp') + dataset_name=$(echo "$common_params" | jq -r '.dataset_name') + dataset_path=$(echo "$common_params" | jq -r '.dataset_path') + port=$(echo "$common_params" | jq -r '.port') + num_prompts=$(echo "$common_params" | jq -r '.num_prompts') + + # get client and server arguments + server_params=$(echo "$params" | jq -r '.vllm_server_parameters') + client_params=$(echo "$params" | jq -r '.vllm_client_parameters') + server_args=$(json2args "$server_params") + client_args=$(json2args "$client_params") + qps_list=$(echo "$params" | jq -r '.qps_list') + qps_list=$(echo "$qps_list" | jq -r '.[] | @sh') + echo "Running over qps list $qps_list" + + # check if there is enough GPU to run the test + if [[ $gpu_count -lt $tp ]]; then + echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $test_name." + continue + fi + + if echo "$common_params" | jq -e 'has("fp8")' > /dev/null; then + echo "Key 'fp8' exists in common params. Use neuralmagic fp8 model for convenience." + model=$(echo "$common_params" | jq -r '.neuralmagic_quantized_model') + server_command="python3 \ + -m vllm.entrypoints.openai.api_server \ + -tp $tp \ + --model $model \ + --port $port \ + $server_args" + else + echo "Key 'fp8' does not exist in common params." + server_command="python3 \ + -m vllm.entrypoints.openai.api_server \ + -tp $tp \ + --model $model \ + --port $port \ + $server_args" + fi + + # run the server + echo "Running test case $test_name" + echo "Server command: $server_command" + eval "$server_command" & + + # wait until the server is alive + wait_for_server + if [ $? -eq 0 ]; then + echo "" + echo "vllm server is up and running." + else + echo "" + echo "vllm failed to start within the timeout period." + break + fi + + # iterate over different QPS + for qps in $qps_list; do + # remove the surrounding single quote from qps + if [[ "$qps" == *"inf"* ]]; then + echo "qps was $qps" + qps="inf" + echo "now qps is $qps" + fi + + new_test_name=$test_name"_qps_"$qps + + client_command="python3 benchmark_serving.py \ + --backend vllm \ + --model $model \ + --dataset-name $dataset_name \ + --dataset-path $dataset_path \ + --num-prompts $num_prompts \ + --port $port \ + --save-result \ + --result-dir $RESULTS_FOLDER \ + --result-filename ${new_test_name}.json \ + --request-rate $qps \ + $client_args" + + echo "Running test case $test_name with qps $qps" + echo "Client command: $client_command" + + eval "$client_command" + + # record the benchmarking commands + jq_output=$(jq -n \ + --arg server "$server_command" \ + --arg client "$client_command" \ + --arg gpu "$gpu_type" \ + --arg engine "vllm" \ + '{ + server_command: $server, + client_command: $client, + gpu_type: $gpu, + engine: $engine + }') + echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands" + + done + + # clean up + kill_gpu_processes + rm -rf /root/.cache/huggingface/* + done +} + + +upload_to_buildkite() { + # upload the benchmarking results to buildkite + + # if the agent binary is not found, skip uploading the results, exit 0 + if [ ! -f /workspace/buildkite-agent ]; then + echo "buildkite-agent binary not found. Skip uploading the results." + return 0 + fi + # /workspace/buildkite-agent annotate --style "success" --context "benchmark-results" --append < $RESULTS_FOLDER/${CURRENT_LLM_SERVING_ENGINE}_nightly_results.md + /workspace/buildkite-agent artifact upload "$RESULTS_FOLDER/*" +} + +main() { + + check_gpus + # enter vllm directory + cd $VLLM_SOURCE_CODE_LOC/benchmarks + declare -g RESULTS_FOLDER=results/ + mkdir -p $RESULTS_FOLDER + BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/ + + export CURRENT_LLM_SERVING_ENGINE=vllm + run_serving_tests $BENCHMARK_ROOT/tests/nightly-tests.json + + python3 -m pip install tabulate pandas + python3 $BENCHMARK_ROOT/scripts/summary-nightly-results.py + upload_to_buildkite + +} + +main "$@" diff --git a/.buildkite/nightly-benchmarks/scripts/summary-nightly-results.py b/.buildkite/nightly-benchmarks/scripts/summary-nightly-results.py new file mode 100644 index 00000000..782d1ef9 --- /dev/null +++ b/.buildkite/nightly-benchmarks/scripts/summary-nightly-results.py @@ -0,0 +1,76 @@ +import datetime +import json +import os +from pathlib import Path + +import pandas as pd +from tabulate import tabulate + +results_folder = Path("results/") + +# serving results and the keys that will be printed into markdown +serving_results = [] +serving_column_mapping = { + "test_name": "Test name", + "gpu_type": "GPU", + "completed": "Successful req.", + "request_throughput": "Tput (req/s)", + "mean_ttft_ms": "Mean TTFT (ms)", + "std_ttft_ms": "Std TTFT (ms)", + "mean_itl_ms": "Mean ITL (ms)", + "std_itl_ms": "Std ITL (ms)", + "input_throughput": "Input Tput (tok/s)", + "output_throughput": "Output Tput (tok/s)", + "engine": "Engine", +} + +if __name__ == "__main__": + + # collect results + for test_file in results_folder.glob("*.json"): + + with open(test_file, "r") as f: + raw_result = json.loads(f.read()) + + # attach the benchmarking command to raw_result + with open(test_file.with_suffix(".commands"), "r") as f: + command = json.loads(f.read()) + raw_result.update(command) + + # update the test name of this result + raw_result.update({"test_name": test_file.stem}) + + # add the result to raw_result + serving_results.append(raw_result) + continue + + serving_results = pd.DataFrame.from_dict(serving_results) + + if not serving_results.empty: + serving_results = serving_results[list( + serving_column_mapping.keys())].rename( + columns=serving_column_mapping) + + serving_md_table_with_headers = tabulate(serving_results, + headers='keys', + tablefmt='pipe', + showindex=False) + # remove the first line of header + serving_md_table_lines = serving_md_table_with_headers.split('\n') + serving_md_table_without_header = '\n'.join(serving_md_table_lines[2:]) + + prefix = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + prefix = prefix + "_" + os.environ.get("CURRENT_LLM_SERVING_ENGINE") + + # document benchmarking results in markdown + with open(results_folder / f"{prefix}_nightly_results.md", "w") as f: + # document results with header. + # for those who wants to reproduce our benchmark. + f.write(serving_md_table_with_headers) + f.write('\n') + + # document benchmarking results in json + with open(results_folder / f"{prefix}_nightly_results.json", "w") as f: + + results = serving_results.to_dict(orient='records') + f.write(json.dumps(results)) diff --git a/.buildkite/nightly-benchmarks/tests/nightly-tests.json b/.buildkite/nightly-benchmarks/tests/nightly-tests.json new file mode 100644 index 00000000..f250833c --- /dev/null +++ b/.buildkite/nightly-benchmarks/tests/nightly-tests.json @@ -0,0 +1,116 @@ +[ + { + "test_name": "llama8B_tp1", + "qps_list": [4], + "common_parameters": { + "model": "meta-llama/Meta-Llama-3-8B", + "tp": 1, + "dataset_name": "sharegpt", + "dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json", + "num_prompts": 500, + "port": 8000 + }, + "lmdeploy_server_parameters": { + }, + "lmdeploy_client_parameters": { + }, + "tgi_server_parameters": { + }, + "tgi_client_parameters": { + "endpoint": "/generate_stream" + }, + "trt_server_parameters": { + "model_type": "llama", + "model_dtype": "float16", + "max_batch_size": 256, + "max_input_len": 4096, + "max_output_len": 4096, + "trt_llm_version": "r24.04" + }, + "trt_client_parameters": { + "endpoint": "/v2/models/ensemble/generate_stream" + }, + "vllm_server_parameters": { + "disable_log_stats": "", + "disable_log_requests": "" + }, + "vllm_client_parameters": { + } + }, + { + "test_name": "llama70B_tp4", + "qps_list": [2], + "common_parameters": { + "model": "meta-llama/Meta-Llama-3-70B-Instruct", + "tp": 4, + "dataset_name": "sharegpt", + "dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json", + "num_prompts": 500, + "port": 8000 + }, + "lmdeploy_server_parameters": { + }, + "lmdeploy_client_parameters": { + }, + "tgi_server_parameters": { + }, + "tgi_client_parameters": { + "endpoint": "/generate_stream" + }, + "trt_server_parameters": { + "model_type": "llama", + "model_dtype": "float16", + "max_batch_size": 256, + "max_input_len": 4096, + "max_output_len": 4096, + "trt_llm_version": "r24.04" + }, + "trt_client_parameters": { + "endpoint": "/v2/models/ensemble/generate_stream" + }, + "vllm_server_parameters": { + "disable_log_stats": "", + "disable_log_requests": "" + }, + "vllm_client_parameters": { + } + }, + { + "test_name": "mixtral8x7B_tp2", + "qps_list": [2], + "common_parameters": { + "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "tp": 2, + "dataset_name": "sharegpt", + "dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json", + "num_prompts": 500, + "port": 8000 + }, + "lmdeploy_server_parameters": { + }, + "lmdeploy_client_parameters": { + }, + "tgi_server_parameters": { + }, + "tgi_client_parameters": { + "endpoint": "/generate_stream" + }, + "trt_server_parameters": { + "model_type": "llama", + "model_dtype": "float16", + "max_batch_size": 256, + "max_input_len": 4096, + "max_output_len": 4096, + "trt_llm_version": "r24.04" + }, + "trt_client_parameters": { + "endpoint": "/v2/models/ensemble/generate_stream" + }, + "vllm_server_parameters": { + "disable_log_stats": "", + "disable_log_requests": "" + }, + "vllm_client_parameters": { + } + } +] \ No newline at end of file diff --git a/README.md b/README.md index dac4b513..53285356 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,8 @@ vLLM is fast with: - Quantization: [GPTQ](https://arxiv.org/abs/2210.17323), [AWQ](https://arxiv.org/abs/2306.00978), [SqueezeLLM](https://arxiv.org/abs/2306.07629), FP8 KV Cache - Optimized CUDA kernels +**Performance benchmark**: We include a [performance benchmark](https://buildkite.com/vllm/performance-benchmark/builds/3924) that compares the performance of vllm against other LLM serving engines ([TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM), [text-generation-inference](https://github.com/huggingface/text-generation-inference) and [lmdeploy](https://github.com/InternLM/lmdeploy)). + vLLM is flexible and easy to use with: - Seamless integration with popular Hugging Face models diff --git a/benchmarks/benchmark_serving.py b/benchmarks/benchmark_serving.py index 7ba97714..b2924b9e 100644 --- a/benchmarks/benchmark_serving.py +++ b/benchmarks/benchmark_serving.py @@ -60,12 +60,15 @@ class BenchmarkMetrics: output_throughput: float mean_ttft_ms: float median_ttft_ms: float + std_ttft_ms: float p99_ttft_ms: float mean_tpot_ms: float median_tpot_ms: float + std_tpot_ms: float p99_tpot_ms: float mean_itl_ms: float median_itl_ms: float + std_itl_ms: float p99_itl_ms: float @@ -274,12 +277,15 @@ def calculate_metrics( mean_ttft_ms=np.mean(ttfts or 0) * 1000, # ttfts is empty if streaming is not supported by backend median_ttft_ms=np.median(ttfts or 0) * 1000, + std_ttft_ms=np.std(ttfts or 0) * 1000, p99_ttft_ms=np.percentile(ttfts or 0, 99) * 1000, mean_tpot_ms=np.mean(tpots or 0) * 1000, median_tpot_ms=np.median(tpots or 0) * 1000, + std_tpot_ms=np.std(tpots or 0) * 1000, p99_tpot_ms=np.percentile(tpots or 0, 99) * 1000, mean_itl_ms=np.mean(itls or 0) * 1000, median_itl_ms=np.median(itls or 0) * 1000, + std_itl_ms=np.std(itls or 0) * 1000, p99_itl_ms=np.percentile(itls or 0, 99) * 1000, ) @@ -396,12 +402,15 @@ async def benchmark( "output_throughput": metrics.output_throughput, "mean_ttft_ms": metrics.mean_ttft_ms, "median_ttft_ms": metrics.median_ttft_ms, + "std_ttft_ms": metrics.std_ttft_ms, "p99_ttft_ms": metrics.p99_ttft_ms, "mean_tpot_ms": metrics.mean_tpot_ms, "median_tpot_ms": metrics.median_tpot_ms, + "std_tpot_ms": metrics.std_tpot_ms, "p99_tpot_ms": metrics.p99_tpot_ms, "mean_itl_ms": metrics.mean_itl_ms, "median_itl_ms": metrics.median_itl_ms, + "std_itl_ms": metrics.std_itl_ms, "p99_itl_ms": metrics.p99_itl_ms, "input_lens": [output.prompt_len for output in outputs], "output_lens": actual_output_lens,