Include tokens from prompt phase in counter_generation_tokens
(#2802)
This commit is contained in:
parent
6f32cddf1c
commit
4caf7044e0
@ -52,6 +52,9 @@ steps:
|
|||||||
- label: LoRA Test
|
- label: LoRA Test
|
||||||
command: pytest -v -s lora
|
command: pytest -v -s lora
|
||||||
|
|
||||||
|
- label: Metrics Test
|
||||||
|
command: pytest -v -s metrics
|
||||||
|
|
||||||
- label: Benchmarks
|
- label: Benchmarks
|
||||||
working_dir: "/vllm-workspace/.buildkite"
|
working_dir: "/vllm-workspace/.buildkite"
|
||||||
commands:
|
commands:
|
||||||
|
@ -9,13 +9,16 @@ MODELS = [
|
|||||||
@pytest.mark.parametrize("model", MODELS)
|
@pytest.mark.parametrize("model", MODELS)
|
||||||
@pytest.mark.parametrize("dtype", ["float"])
|
@pytest.mark.parametrize("dtype", ["float"])
|
||||||
@pytest.mark.parametrize("max_tokens", [128])
|
@pytest.mark.parametrize("max_tokens", [128])
|
||||||
def test_metrics(
|
def test_metric_counter_prompt_tokens(
|
||||||
vllm_runner,
|
vllm_runner,
|
||||||
example_prompts,
|
example_prompts,
|
||||||
model: str,
|
model: str,
|
||||||
dtype: str,
|
dtype: str,
|
||||||
max_tokens: int,
|
max_tokens: int,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
# Reset metric
|
||||||
|
vllm.engine.metrics.counter_prompt_tokens.set_value({}, 0)
|
||||||
|
|
||||||
vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False)
|
vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False)
|
||||||
tokenizer = vllm_model.model.get_tokenizer()
|
tokenizer = vllm_model.model.get_tokenizer()
|
||||||
prompt_token_counts = [len(tokenizer.encode(p)) for p in example_prompts]
|
prompt_token_counts = [len(tokenizer.encode(p)) for p in example_prompts]
|
||||||
@ -31,3 +34,32 @@ def test_metrics(
|
|||||||
assert vllm_prompt_token_count == metric_count, (
|
assert vllm_prompt_token_count == metric_count, (
|
||||||
f"prompt token count: {vllm_prompt_token_count!r}\nmetric: {metric_count!r}"
|
f"prompt token count: {vllm_prompt_token_count!r}\nmetric: {metric_count!r}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("model", MODELS)
|
||||||
|
@pytest.mark.parametrize("dtype", ["float"])
|
||||||
|
@pytest.mark.parametrize("max_tokens", [128])
|
||||||
|
def test_metric_counter_generation_tokens(
|
||||||
|
vllm_runner,
|
||||||
|
example_prompts,
|
||||||
|
model: str,
|
||||||
|
dtype: str,
|
||||||
|
max_tokens: int,
|
||||||
|
) -> None:
|
||||||
|
# Reset metric
|
||||||
|
vllm.engine.metrics.counter_generation_tokens.set_value({}, 0)
|
||||||
|
|
||||||
|
vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False)
|
||||||
|
vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens)
|
||||||
|
tokenizer = vllm_model.model.get_tokenizer()
|
||||||
|
metric_count = vllm.engine.metrics.counter_generation_tokens.get_value({})
|
||||||
|
vllm_generation_count = 0
|
||||||
|
for i in range(len(example_prompts)):
|
||||||
|
vllm_output_ids, vllm_output_str = vllm_outputs[i]
|
||||||
|
prompt_ids = tokenizer.encode(example_prompts[i])
|
||||||
|
# vllm_output_ids contains both prompt tokens and generation tokens. We're interested only in the count of the generation tokens.
|
||||||
|
vllm_generation_count += len(vllm_output_ids) - len(prompt_ids)
|
||||||
|
|
||||||
|
assert vllm_generation_count == metric_count, (
|
||||||
|
f"generation token count: {vllm_generation_count!r}\nmetric: {metric_count!r}"
|
||||||
|
)
|
||||||
|
@ -872,6 +872,9 @@ class LLMEngine:
|
|||||||
num_prompt_tokens = sum(
|
num_prompt_tokens = sum(
|
||||||
len(seq_group.prompt_token_ids)
|
len(seq_group.prompt_token_ids)
|
||||||
for seq_group in scheduler_outputs.scheduled_seq_groups)
|
for seq_group in scheduler_outputs.scheduled_seq_groups)
|
||||||
|
num_generation_tokens = sum(
|
||||||
|
seq_group.num_seqs()
|
||||||
|
for seq_group in scheduler_outputs.scheduled_seq_groups)
|
||||||
else:
|
else:
|
||||||
num_generation_tokens = scheduler_outputs.num_batched_tokens
|
num_generation_tokens = scheduler_outputs.num_batched_tokens
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user