2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
2025-03-03 01:34:51 +00:00
|
|
|
from collections.abc import Sequence
|
2024-05-02 02:13:03 +08:00
|
|
|
from itertools import cycle
|
2025-03-03 01:34:51 +00:00
|
|
|
from typing import Optional, Union
|
2024-04-23 01:02:36 -07:00
|
|
|
|
2024-04-02 17:40:57 -07:00
|
|
|
import pytest
|
2025-01-27 22:38:35 +01:00
|
|
|
import torch
|
2024-05-20 13:29:28 -05:00
|
|
|
|
2024-09-11 14:07:34 -07:00
|
|
|
from vllm import LLM, SamplingParams
|
2024-10-18 14:30:55 -07:00
|
|
|
from vllm.distributed import cleanup_dist_env_and_memory
|
2024-04-02 17:40:57 -07:00
|
|
|
from vllm.model_executor.utils import set_random_seed
|
2024-09-24 18:29:56 -06:00
|
|
|
from vllm.sequence import PromptLogprobs, SampleLogprobs
|
2024-05-01 01:12:59 +08:00
|
|
|
|
2024-09-24 18:29:56 -06:00
|
|
|
from ...models.utils import (TokensTextLogprobs,
|
|
|
|
TokensTextLogprobsPromptLogprobs,
|
|
|
|
check_logprobs_close, check_outputs_equal)
|
2024-09-11 14:07:34 -07:00
|
|
|
from ...utils import RemoteOpenAIServer
|
2024-05-13 22:50:09 +08:00
|
|
|
|
2024-09-11 14:07:34 -07:00
|
|
|
PROMPTS = [
|
|
|
|
"Hello, my name is",
|
|
|
|
"The president of the United States is",
|
|
|
|
"The capital of France is",
|
|
|
|
"The future of AI is",
|
|
|
|
"San Francisco is know for its",
|
|
|
|
"Facebook was created in 2004 by",
|
|
|
|
"Curious George is a",
|
|
|
|
"Python 3.11 brings improvements to its",
|
|
|
|
]
|
2024-04-02 17:40:57 -07:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2024-09-11 14:07:34 -07:00
|
|
|
def test_llm_generator(common_llm_kwargs, per_test_common_llm_kwargs,
|
2024-04-02 17:40:57 -07:00
|
|
|
test_llm_kwargs, seed):
|
|
|
|
|
2024-09-11 14:07:34 -07:00
|
|
|
def generate():
|
|
|
|
kwargs = {
|
|
|
|
**common_llm_kwargs,
|
|
|
|
**per_test_common_llm_kwargs,
|
|
|
|
**test_llm_kwargs,
|
|
|
|
}
|
|
|
|
|
|
|
|
llm = LLM(**kwargs)
|
2024-04-02 17:40:57 -07:00
|
|
|
|
2024-07-24 08:58:31 -07:00
|
|
|
if seed is not None:
|
|
|
|
set_random_seed(seed)
|
2024-04-02 17:40:57 -07:00
|
|
|
|
|
|
|
yield llm
|
2024-09-11 14:07:34 -07:00
|
|
|
|
2024-04-02 17:40:57 -07:00
|
|
|
del llm
|
2024-10-18 14:30:55 -07:00
|
|
|
cleanup_dist_env_and_memory()
|
2024-04-02 17:40:57 -07:00
|
|
|
|
2024-09-11 14:07:34 -07:00
|
|
|
return generate
|
2024-04-23 01:02:36 -07:00
|
|
|
|
|
|
|
|
2024-05-08 02:40:18 +08:00
|
|
|
def maybe_assert_ngram_worker(llm):
|
|
|
|
# Verify the proposer worker is ngram if ngram is specified.
|
2024-09-11 14:07:34 -07:00
|
|
|
if (llm.llm_engine.speculative_config is not None
|
2025-03-23 13:28:10 +08:00
|
|
|
and llm.llm_engine.speculative_config.method == "ngram"):
|
2024-05-08 02:40:18 +08:00
|
|
|
from vllm.spec_decode.ngram_worker import NGramWorker
|
|
|
|
assert isinstance(
|
|
|
|
llm.llm_engine.model_executor.driver_worker.proposer_worker,
|
|
|
|
NGramWorker)
|
|
|
|
|
|
|
|
|
2024-04-23 01:02:36 -07:00
|
|
|
def get_output_from_llm_generator(
|
|
|
|
llm_generator, prompts,
|
2025-03-03 01:34:51 +00:00
|
|
|
sampling_params) -> tuple[list[str], list[list[int]], float]:
|
|
|
|
tokens: list[str] = []
|
|
|
|
token_ids: list[list[int]] = []
|
2024-07-16 13:37:10 -07:00
|
|
|
acceptance_rate: float = -1.0
|
2024-04-23 01:02:36 -07:00
|
|
|
for llm in llm_generator():
|
2024-05-08 02:40:18 +08:00
|
|
|
maybe_assert_ngram_worker(llm)
|
|
|
|
|
2024-04-23 01:02:36 -07:00
|
|
|
outputs = llm.generate(prompts, sampling_params, use_tqdm=True)
|
2024-07-17 17:30:28 -04:00
|
|
|
|
2024-04-23 01:02:36 -07:00
|
|
|
token_ids = [output.outputs[0].token_ids for output in outputs]
|
|
|
|
tokens = [output.outputs[0].text for output in outputs]
|
2024-07-16 13:37:10 -07:00
|
|
|
|
|
|
|
# Fetch acceptance rate if logging is enabled.
|
|
|
|
if stat_loggers := getattr(llm.llm_engine, "stat_loggers", None):
|
|
|
|
stat_logger = stat_loggers["prometheus"]
|
|
|
|
acceptance_rate = (stat_logger.metrics.
|
|
|
|
gauge_spec_decode_draft_acceptance_rate.labels(
|
|
|
|
**stat_logger.labels)._value.get())
|
2024-04-02 17:40:57 -07:00
|
|
|
del llm
|
2024-04-23 01:02:36 -07:00
|
|
|
|
2024-07-16 13:37:10 -07:00
|
|
|
return tokens, token_ids, acceptance_rate
|
2024-05-02 02:13:03 +08:00
|
|
|
|
|
|
|
|
2024-09-24 18:29:56 -06:00
|
|
|
def check_logprobs_correctness(
|
|
|
|
spec_outputs: Sequence[Union[TokensTextLogprobs,
|
|
|
|
TokensTextLogprobsPromptLogprobs]],
|
|
|
|
baseline_outputs: Sequence[Union[TokensTextLogprobs,
|
|
|
|
TokensTextLogprobsPromptLogprobs]],
|
|
|
|
disable_logprobs: bool = False,
|
|
|
|
):
|
|
|
|
"""Compare sampled and prompt logprobs between baseline and spec decoding
|
|
|
|
"""
|
|
|
|
if not disable_logprobs:
|
|
|
|
return check_logprobs_close(
|
|
|
|
outputs_0_lst=baseline_outputs,
|
|
|
|
outputs_1_lst=spec_outputs,
|
|
|
|
name_0="org",
|
|
|
|
name_1="sd",
|
|
|
|
)
|
|
|
|
|
|
|
|
# Check correctness when disable_logprobs == True
|
|
|
|
for spec_output, baseline_output in zip(spec_outputs, baseline_outputs):
|
|
|
|
# Check generated token logprobs.
|
|
|
|
spec_logprobs = spec_output[2]
|
|
|
|
baseline_logprobs = baseline_output[2]
|
|
|
|
_check_logprobs_when_output_disabled(spec_logprobs,
|
|
|
|
baseline_logprobs,
|
|
|
|
is_prompt_logprobs=False)
|
|
|
|
|
|
|
|
# Check prompt logprobs too, if they exist
|
|
|
|
if len(baseline_output) == 4:
|
|
|
|
assert len(spec_output) == 4
|
|
|
|
spec_prompt_logprobs = spec_output[3]
|
|
|
|
baseline_prompt_logprobs = baseline_output[3]
|
|
|
|
_check_logprobs_when_output_disabled(spec_prompt_logprobs,
|
|
|
|
baseline_prompt_logprobs,
|
|
|
|
is_prompt_logprobs=True)
|
|
|
|
|
|
|
|
|
|
|
|
def _check_logprobs_when_output_disabled(
|
|
|
|
spec_logprobs: Union[Optional[PromptLogprobs], SampleLogprobs],
|
|
|
|
baseline_logprobs: Union[Optional[PromptLogprobs], SampleLogprobs],
|
|
|
|
is_prompt_logprobs: bool = False,
|
|
|
|
):
|
|
|
|
# Prompt logprobs are optional
|
|
|
|
if is_prompt_logprobs and baseline_logprobs is None:
|
|
|
|
assert spec_logprobs is None
|
|
|
|
return
|
|
|
|
|
|
|
|
assert spec_logprobs is not None
|
|
|
|
assert baseline_logprobs is not None
|
|
|
|
assert len(spec_logprobs) == len(baseline_logprobs)
|
|
|
|
|
|
|
|
# For each generated position of the sequence.
|
|
|
|
for pos, (spec_pos_logprobs, baseline_pos_logprobs) in enumerate(
|
|
|
|
zip(spec_logprobs, baseline_logprobs)):
|
|
|
|
|
|
|
|
# First prompt logprob is expected to be None
|
|
|
|
if is_prompt_logprobs and baseline_pos_logprobs is None:
|
|
|
|
assert spec_pos_logprobs is None
|
|
|
|
assert pos == 0
|
|
|
|
continue
|
|
|
|
|
|
|
|
assert spec_pos_logprobs is not None
|
|
|
|
assert baseline_pos_logprobs is not None
|
|
|
|
|
|
|
|
# When disabled, the 1 logprob is returned with dummy values for the
|
|
|
|
# score and rank, but the token id should match the baseline model
|
|
|
|
assert len(spec_pos_logprobs) == 1
|
|
|
|
(spec_pos_logprob_token_id,
|
|
|
|
spec_pos_logprob) = next(iter(spec_pos_logprobs.items()))
|
|
|
|
assert spec_pos_logprob.rank == -1
|
|
|
|
assert spec_pos_logprob.logprob == 0.0
|
2025-01-27 22:38:35 +01:00
|
|
|
if isinstance(spec_pos_logprob_token_id, torch.Tensor):
|
|
|
|
spec_pos_logprob_token_id = spec_pos_logprob_token_id.item()
|
2024-09-24 18:29:56 -06:00
|
|
|
assert spec_pos_logprob_token_id in baseline_pos_logprobs
|
2024-07-19 04:22:08 +02:00
|
|
|
|
|
|
|
|
2024-08-20 06:28:14 +05:30
|
|
|
def run_equality_correctness_test(
|
2024-09-11 14:07:34 -07:00
|
|
|
vllm_runner,
|
|
|
|
common_llm_kwargs,
|
|
|
|
per_test_common_llm_kwargs,
|
|
|
|
baseline_llm_kwargs,
|
|
|
|
test_llm_kwargs,
|
|
|
|
batch_size: int,
|
|
|
|
max_output_len: int,
|
|
|
|
seed: Optional[int] = 0,
|
|
|
|
temperature: float = 0.0,
|
|
|
|
disable_seed: bool = False,
|
|
|
|
ignore_eos: bool = True,
|
2024-08-20 06:28:14 +05:30
|
|
|
ensure_all_accepted: bool = False,
|
2024-09-24 18:29:56 -06:00
|
|
|
expected_acceptance_rate: Optional[float] = None,
|
|
|
|
logprobs: Optional[int] = None,
|
|
|
|
prompt_logprobs: Optional[int] = None,
|
|
|
|
disable_logprobs: bool = False):
|
2024-09-11 14:07:34 -07:00
|
|
|
|
|
|
|
org_args = {
|
|
|
|
**common_llm_kwargs,
|
|
|
|
**per_test_common_llm_kwargs,
|
|
|
|
**baseline_llm_kwargs,
|
|
|
|
}
|
|
|
|
|
|
|
|
sd_args = {
|
|
|
|
**common_llm_kwargs,
|
|
|
|
**per_test_common_llm_kwargs,
|
|
|
|
**test_llm_kwargs,
|
|
|
|
}
|
|
|
|
|
|
|
|
prompts = [prompt for prompt, _ in zip(cycle(PROMPTS), range(batch_size))]
|
|
|
|
|
|
|
|
if disable_seed:
|
|
|
|
seed = None
|
|
|
|
|
|
|
|
sampling_params = SamplingParams(temperature=temperature,
|
|
|
|
max_tokens=max_output_len,
|
|
|
|
seed=seed,
|
2024-09-24 18:29:56 -06:00
|
|
|
ignore_eos=ignore_eos,
|
|
|
|
logprobs=logprobs,
|
|
|
|
prompt_logprobs=prompt_logprobs)
|
2024-09-11 14:07:34 -07:00
|
|
|
|
|
|
|
with vllm_runner(**org_args) as vllm_model:
|
2024-09-24 18:29:56 -06:00
|
|
|
org_outputs = vllm_model.generate_w_logprobs(prompts, sampling_params)
|
2024-09-11 14:07:34 -07:00
|
|
|
|
|
|
|
with vllm_runner(**sd_args) as vllm_model:
|
|
|
|
if ensure_all_accepted or expected_acceptance_rate is not None:
|
|
|
|
# Force log interval to be 0 to catch all metrics.
|
|
|
|
stat_logger = vllm_model.model.llm_engine.stat_loggers[
|
|
|
|
'prometheus']
|
|
|
|
stat_logger.local_interval = -100
|
|
|
|
|
2024-09-24 18:29:56 -06:00
|
|
|
sd_outputs = vllm_model.generate_w_logprobs(prompts, sampling_params)
|
2024-09-11 14:07:34 -07:00
|
|
|
|
|
|
|
if ensure_all_accepted or expected_acceptance_rate is not None:
|
|
|
|
acceptance_rate = (stat_logger.metrics.
|
|
|
|
gauge_spec_decode_draft_acceptance_rate.labels(
|
|
|
|
**stat_logger.labels)._value.get())
|
|
|
|
|
|
|
|
if ensure_all_accepted:
|
|
|
|
assert True
|
|
|
|
# FIXME: ci fails to log acceptance rate.
|
|
|
|
# It works locally.
|
|
|
|
# assert acceptance_rate == 1.0
|
|
|
|
|
|
|
|
if expected_acceptance_rate is not None:
|
|
|
|
assert acceptance_rate >= expected_acceptance_rate - 1e-2
|
|
|
|
|
2024-09-24 18:29:56 -06:00
|
|
|
# Only pass token entries, not the logprobs
|
|
|
|
check_outputs_equal(outputs_0_lst=[out[0:2] for out in org_outputs],
|
|
|
|
outputs_1_lst=[out[0:2] for out in sd_outputs],
|
2024-09-11 14:07:34 -07:00
|
|
|
name_0="org",
|
|
|
|
name_1="sd")
|
|
|
|
|
2024-09-24 18:29:56 -06:00
|
|
|
# Check logprobs if requested
|
|
|
|
if logprobs is not None or prompt_logprobs is not None:
|
|
|
|
check_logprobs_correctness(sd_outputs, org_outputs, disable_logprobs)
|
|
|
|
|
2024-09-11 14:07:34 -07:00
|
|
|
|
|
|
|
def run_equality_correctness_test_tp(model,
|
|
|
|
common_llm_kwargs,
|
|
|
|
per_test_common_llm_kwargs,
|
|
|
|
baseline_llm_kwargs,
|
|
|
|
test_llm_kwargs,
|
|
|
|
batch_size: int,
|
|
|
|
max_output_len: int,
|
|
|
|
seed: int = 0,
|
2025-01-27 22:38:35 +01:00
|
|
|
temperature: float = 0.0,
|
|
|
|
logprobs: Optional[int] = None):
|
2024-07-19 04:22:08 +02:00
|
|
|
"""Helper method that compares the outputs of both the baseline LLM and
|
|
|
|
the test LLM. It asserts greedy equality, e.g. that the outputs are exactly
|
2024-09-11 14:07:34 -07:00
|
|
|
the same when temperature is zero.
|
2024-07-19 04:22:08 +02:00
|
|
|
"""
|
2024-09-11 14:07:34 -07:00
|
|
|
arg1 = common_llm_kwargs + per_test_common_llm_kwargs + baseline_llm_kwargs
|
|
|
|
arg2 = common_llm_kwargs + per_test_common_llm_kwargs + test_llm_kwargs
|
|
|
|
env1 = env2 = None
|
|
|
|
|
|
|
|
max_wait_seconds = 240
|
|
|
|
results = []
|
|
|
|
|
|
|
|
prompts = [prompt for prompt, _ in zip(cycle(PROMPTS), range(batch_size))]
|
|
|
|
for args, env in ((arg1, env1), (arg2, env2)):
|
|
|
|
with RemoteOpenAIServer(model,
|
|
|
|
args,
|
|
|
|
env_dict=env,
|
|
|
|
max_wait_seconds=max_wait_seconds) as server:
|
|
|
|
client = server.get_client()
|
|
|
|
|
|
|
|
completion = client.completions.create(model=model,
|
|
|
|
prompt=prompts,
|
|
|
|
max_tokens=max_output_len,
|
|
|
|
seed=seed,
|
2025-01-27 22:38:35 +01:00
|
|
|
temperature=temperature,
|
|
|
|
logprobs=logprobs)
|
2024-09-11 14:07:34 -07:00
|
|
|
|
|
|
|
results.append({
|
|
|
|
"test":
|
|
|
|
"seeded_sampling",
|
|
|
|
"text": [choice.text for choice in completion.choices],
|
2025-01-27 22:38:35 +01:00
|
|
|
"logprobs": [choice.logprobs for choice in completion.choices],
|
2024-09-11 14:07:34 -07:00
|
|
|
"finish_reason":
|
|
|
|
[choice.finish_reason for choice in completion.choices],
|
|
|
|
"usage":
|
|
|
|
completion.usage,
|
|
|
|
})
|
|
|
|
|
|
|
|
n = len(results) // 2
|
|
|
|
arg1_results = results[:n]
|
|
|
|
arg2_results = results[n:]
|
2025-01-27 22:38:35 +01:00
|
|
|
# Separate logprobs to avoid asserting exact equality.
|
|
|
|
arg1_logprobs = [r.pop("logprobs") for r in arg1_results]
|
|
|
|
arg2_logprobs = [r.pop("logprobs") for r in arg2_results]
|
|
|
|
|
2024-09-11 14:07:34 -07:00
|
|
|
for arg1_result, arg2_result in zip(arg1_results, arg2_results):
|
|
|
|
assert arg1_result == arg2_result, (
|
|
|
|
f"Results for {model=} are not the same with {arg1=} and {arg2=}. "
|
|
|
|
f"{arg1_result=} != {arg2_result=}")
|
2025-01-27 22:38:35 +01:00
|
|
|
if logprobs:
|
|
|
|
for logs1, logs2 in zip(arg1_logprobs, arg2_logprobs):
|
|
|
|
for l1, l2 in zip(logs1, logs2):
|
|
|
|
assert l1.tokens == l2.tokens
|