[Metrics] add more metrics (#4464)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
Co-authored-by: Robert Shaw <rshaw@neuralmagic.com>
Co-authored-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
harrywu 2024-11-12 00:17:38 +08:00 committed by GitHub
parent 2cebda42bb
commit 874f551b36
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 437 additions and 50 deletions

View File

@ -1,33 +1,4 @@
{
"__inputs": [
],
"__elements": {},
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "10.4.2"
},
{
"type": "panel",
"id": "heatmap",
"name": "Heatmap",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
},
{
"type": "panel",
"id": "timeseries",
"name": "Time series",
"version": ""
}
],
"annotations": {
"list": [
{
@ -54,7 +25,7 @@
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": null,
"id": 1,
"links": [],
"liveNow": false,
"panels": [
@ -76,6 +47,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@ -241,6 +213,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@ -358,6 +331,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@ -523,6 +497,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@ -658,6 +633,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@ -823,6 +799,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@ -984,7 +961,7 @@
"unit": "none"
}
},
"pluginVersion": "10.4.2",
"pluginVersion": "11.2.0",
"targets": [
{
"datasource": {
@ -1076,7 +1053,7 @@
"unit": "none"
}
},
"pluginVersion": "10.4.2",
"pluginVersion": "11.2.0",
"targets": [
{
"datasource": {
@ -1117,6 +1094,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@ -1147,8 +1125,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
"color": "green"
},
{
"color": "red",
@ -1199,6 +1176,319 @@
],
"title": "Finish Reason",
"type": "timeseries"
},
{
"datasource": {
"default": false,
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "seconds",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 32
},
"id": 14,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "edx8memhpd9tsa"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "rate(vllm:request_queue_time_seconds_sum{model_name=\"$model_name\"}[$__rate_interval])",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "Queue Time",
"type": "timeseries"
},
{
"datasource": {
"default": false,
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 40
},
"id": 15,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "edx8memhpd9tsa"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "rate(vllm:request_prefill_time_seconds_sum{model_name=\"$model_name\"}[$__rate_interval])",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "Prefill",
"range": true,
"refId": "A",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"expr": "rate(vllm:request_decode_time_seconds_sum{model_name=\"$model_name\"}[$__rate_interval])",
"hide": false,
"instant": false,
"legendFormat": "Decode",
"range": true,
"refId": "B"
}
],
"title": "Requests Prefill and Decode Time",
"type": "timeseries"
},
{
"datasource": {
"default": false,
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 40
},
"id": 16,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "edx8memhpd9tsa"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "rate(vllm:request_max_num_generation_tokens_sum{model_name=\"$model_name\"}[$__rate_interval])",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "Tokens",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "Max Generation Token in Sequence Group",
"type": "timeseries"
}
],
"refresh": "",
@ -1207,21 +1497,34 @@
"templating": {
"list": [
{
"type": "datasource",
"name": "DS_PROMETHEUS",
"label": "datasource",
"current": {},
"current": {
"selected": false,
"text": "prometheus",
"value": "edx8memhpd9tsa"
},
"hide": 0,
"includeAll": false,
"label": "datasource",
"multi": false,
"name": "DS_PROMETHEUS",
"options": [],
"query": "prometheus",
"queryValue": "",
"refresh": 1,
"regex": "",
"skipUrlSync": false
"skipUrlSync": false,
"type": "datasource"
},
{
"current": {
"selected": false,
"text": "/share/datasets/public_models/Meta-Llama-3-8B-Instruct",
"value": "/share/datasets/public_models/Meta-Llama-3-8B-Instruct"
},
"datasource": {
"type": "prometheus",
"uid": "edx8memhpd9tsa"
},
"definition": "label_values(model_name)",
"hide": 0,
"includeAll": false,
@ -1249,7 +1552,6 @@
"timezone": "",
"title": "vLLM",
"uid": "b281712d-8bff-41ef-9f3f-71ad43c05e9b",
"version": 1,
"version": 8,
"weekStart": ""
}

View File

@ -1672,6 +1672,7 @@ class LLMEngine:
# Iteration stats
num_prompt_tokens_iter = 0
num_generation_tokens_iter = 0
num_tokens_iter = 0
time_to_first_tokens_iter: List[float] = []
time_per_output_tokens_iter: List[float] = []
num_preemption_iter = (0 if scheduler_outputs is None else
@ -1680,6 +1681,10 @@ class LLMEngine:
# Request stats
# Latency
time_e2e_requests: List[float] = []
time_queue_requests: List[float] = []
time_inference_requests: List[float] = []
time_prefill_requests: List[float] = []
time_decode_requests: List[float] = []
time_in_queue_requests: List[float] = []
model_forward_time_requests: List[float] = []
model_execute_time_requests: List[float] = []
@ -1687,6 +1692,7 @@ class LLMEngine:
num_prompt_tokens_requests: List[int] = []
num_generation_tokens_requests: List[int] = []
n_requests: List[int] = []
max_num_generation_tokens_requests: List[int] = []
max_tokens_requests: List[int] = []
finished_reason_requests: List[str] = []
@ -1777,6 +1783,18 @@ class LLMEngine:
# Latency timings
time_e2e_requests.append(now -
seq_group.metrics.arrival_time)
if (seq_group.metrics.first_scheduled_time is not None and
seq_group.metrics.first_token_time is not None):
time_queue_requests.append(
seq_group.metrics.first_scheduled_time -
seq_group.metrics.arrival_time)
time_prefill_requests.append(
seq_group.metrics.first_token_time -
seq_group.metrics.first_scheduled_time)
time_decode_requests.append(
now - seq_group.metrics.first_token_time)
time_inference_requests.append(
now - seq_group.metrics.first_scheduled_time)
if seq_group.metrics.time_in_queue is not None:
time_in_queue_requests.append(
seq_group.metrics.time_in_queue)
@ -1793,6 +1811,9 @@ class LLMEngine:
seq.get_output_len()
for seq in seq_group.get_finished_seqs()
])
max_num_generation_tokens_requests.append(
max(seq.get_output_len()
for seq in seq_group.get_seqs()))
if seq_group.sampling_params is not None:
n_requests.append(seq_group.sampling_params.n)
max_tokens_requests.append(
@ -1811,7 +1832,8 @@ class LLMEngine:
num_generation_tokens_iter = (
actual_num_batched_tokens - num_prompt_tokens_iter +
num_generation_tokens_from_prefill_groups)
num_tokens_iter = (num_generation_tokens_iter +
num_prompt_tokens_iter)
# Spec decode, if enabled, emits specialized metrics from the worker in
# sampler output.
if model_output and (model_output[0].spec_decode_worker_metrics
@ -1837,6 +1859,7 @@ class LLMEngine:
# Iteration stats
num_prompt_tokens_iter=num_prompt_tokens_iter,
num_generation_tokens_iter=num_generation_tokens_iter,
num_tokens_iter=num_tokens_iter,
time_to_first_tokens_iter=time_to_first_tokens_iter,
time_per_output_tokens_iter=time_per_output_tokens_iter,
spec_decode_metrics=spec_decode_metrics,
@ -1845,12 +1868,18 @@ class LLMEngine:
# Request stats
# Latency
time_e2e_requests=time_e2e_requests,
time_queue_requests=time_queue_requests,
time_inference_requests=time_inference_requests,
time_prefill_requests=time_prefill_requests,
time_decode_requests=time_decode_requests,
time_in_queue_requests=time_in_queue_requests,
model_forward_time_requests=model_forward_time_requests,
model_execute_time_requests=model_execute_time_requests,
# Metadata
num_prompt_tokens_requests=num_prompt_tokens_requests,
num_generation_tokens_requests=num_generation_tokens_requests,
max_num_generation_tokens_requests=
max_num_generation_tokens_requests,
n_requests=n_requests,
max_tokens_requests=max_tokens_requests,
finished_reason_requests=finished_reason_requests,

View File

@ -111,6 +111,15 @@ class Metrics:
name="vllm:generation_tokens_total",
documentation="Number of generation tokens processed.",
labelnames=labelnames)
self.counter_tokens = self._counter_cls(
name="vllm:tokens_total",
documentation="Number of prefill plus generation tokens processed.",
labelnames=labelnames)
self.histogram_iteration_tokens = self._histogram_cls(
name="vllm:iteration_tokens_total",
documentation="Histogram of number of tokens per engine_step.",
labelnames=labelnames,
buckets=[1, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8096])
self.histogram_time_to_first_token = self._histogram_cls(
name="vllm:time_to_first_token_seconds",
documentation="Histogram of time to first token in seconds.",
@ -130,23 +139,45 @@ class Metrics:
# Request stats
# Latency
request_latency_buckets = [
0.3, 0.5, 0.8, 1.0, 1.5, 2.0, 2.5, 5.0, 10.0, 15.0, 20.0, 30.0,
40.0, 50.0, 60.0
]
self.histogram_e2e_time_request = self._histogram_cls(
name="vllm:e2e_request_latency_seconds",
documentation="Histogram of end to end request latency in seconds.",
labelnames=labelnames,
buckets=[
0.3, 0.5, 0.8, 1.0, 1.5, 2.0, 2.5, 5.0, 10.0, 15.0, 20.0, 30.0,
40.0, 50.0, 60.0
])
buckets=request_latency_buckets)
self.histogram_queue_time_request = self._histogram_cls(
name="vllm:request_queue_time_seconds",
documentation=
"Histogram of time spent in WAITING phase for request.",
labelnames=labelnames,
buckets=request_latency_buckets)
self.histogram_inference_time_request = self._histogram_cls(
name="vllm:request_inference_time_seconds",
documentation=
"Histogram of time spent in RUNNING phase for request.",
labelnames=labelnames,
buckets=request_latency_buckets)
self.histogram_prefill_time_request = self._histogram_cls(
name="vllm:request_prefill_time_seconds",
documentation=
"Histogram of time spent in PREFILL phase for request.",
labelnames=labelnames,
buckets=request_latency_buckets)
self.histogram_decode_time_request = self._histogram_cls(
name="vllm:request_decode_time_seconds",
documentation=
"Histogram of time spent in DECODE phase for request.",
labelnames=labelnames,
buckets=request_latency_buckets)
self.histogram_time_in_queue_request = self._histogram_cls(
name="vllm:time_in_queue_requests",
documentation=
"Histogram of time the request spent in the queue in seconds.",
labelnames=labelnames,
buckets=[
0.3, 0.5, 0.8, 1.0, 1.5, 2.0, 2.5, 5.0, 10.0, 15.0, 20.0, 30.0,
40.0, 50.0, 60.0
])
buckets=request_latency_buckets)
self.histogram_model_forward_time_request = self._histogram_cls(
name="vllm:model_forward_time_milliseconds",
documentation=
@ -173,6 +204,12 @@ class Metrics:
labelnames=labelnames,
buckets=build_1_2_5_buckets(max_model_len),
)
self.histogram_max_num_generation_tokens_request = self._histogram_cls(
name="vllm:request_max_num_generation_tokens",
documentation=
"Histogram of maximum number of requested generation tokens.",
labelnames=labelnames,
buckets=build_1_2_5_buckets(max_model_len))
self.histogram_n_request = self._histogram_cls(
name="vllm:request_params_n",
documentation="Histogram of the n request parameter.",
@ -526,6 +563,8 @@ class PrometheusStatLogger(StatLoggerBase):
stats.num_prompt_tokens_iter)
self._log_counter(self.metrics.counter_generation_tokens,
stats.num_generation_tokens_iter)
self._log_histogram(self.metrics.histogram_iteration_tokens,
[stats.num_tokens_iter])
self._log_histogram(self.metrics.histogram_time_to_first_token,
stats.time_to_first_tokens_iter)
self._log_histogram(self.metrics.histogram_time_per_output_token,
@ -535,6 +574,14 @@ class PrometheusStatLogger(StatLoggerBase):
# Latency
self._log_histogram(self.metrics.histogram_e2e_time_request,
stats.time_e2e_requests)
self._log_histogram(self.metrics.histogram_queue_time_request,
stats.time_queue_requests)
self._log_histogram(self.metrics.histogram_inference_time_request,
stats.time_inference_requests)
self._log_histogram(self.metrics.histogram_decode_time_request,
stats.time_prefill_requests)
self._log_histogram(self.metrics.histogram_prefill_time_request,
stats.time_decode_requests)
self._log_histogram(self.metrics.histogram_time_in_queue_request,
stats.time_in_queue_requests)
self._log_histogram(self.metrics.histogram_model_forward_time_request,
@ -553,6 +600,9 @@ class PrometheusStatLogger(StatLoggerBase):
self.metrics.histogram_num_generation_tokens_request,
stats.num_generation_tokens_requests)
self._log_histogram(self.metrics.histogram_n_request, stats.n_requests)
self._log_histogram(
self.metrics.histogram_max_num_generation_tokens_request,
stats.max_num_generation_tokens_requests)
self._log_histogram(self.metrics.histogram_max_tokens_request,
stats.max_tokens_requests)

View File

@ -39,6 +39,7 @@ class Stats:
# Iteration stats (should have _iter suffix)
num_prompt_tokens_iter: int
num_generation_tokens_iter: int
num_tokens_iter: int
time_to_first_tokens_iter: List[float]
time_per_output_tokens_iter: List[float]
num_preemption_iter: int
@ -46,6 +47,10 @@ class Stats:
# Request stats (should have _requests suffix)
# Latency
time_e2e_requests: List[float]
time_queue_requests: List[float]
time_inference_requests: List[float]
time_prefill_requests: List[float]
time_decode_requests: List[float]
time_in_queue_requests: List[float]
model_forward_time_requests: List[float]
model_execute_time_requests: List[float]
@ -53,6 +58,7 @@ class Stats:
num_prompt_tokens_requests: List[int]
num_generation_tokens_requests: List[int]
n_requests: List[int]
max_num_generation_tokens_requests: List[int]
max_tokens_requests: List[int]
finished_reason_requests: List[str]
waiting_lora_adapters: List[str]