[Bugfix] Fix speculative decode seeded test (#6743)
This commit is contained in:
parent
545146349c
commit
2cf0df3381
@ -191,7 +191,8 @@ def create_llm_generator(baseline_or_test, request, common_llm_kwargs,
|
||||
and llm.llm_engine.log_stats):
|
||||
for sate_logger in llm.llm_engine.stat_loggers.values():
|
||||
sate_logger.local_interval = 0
|
||||
set_random_seed(seed)
|
||||
if seed is not None:
|
||||
set_random_seed(seed)
|
||||
|
||||
yield llm
|
||||
del llm
|
||||
|
@ -21,7 +21,8 @@ from .conftest import run_equality_correctness_test
|
||||
"num_speculative_tokens": 3,
|
||||
}])
|
||||
@pytest.mark.parametrize("per_test_common_llm_kwargs", [{}])
|
||||
@pytest.mark.parametrize("baseline_llm_kwargs", [{}])
|
||||
@pytest.mark.parametrize("baseline_llm_kwargs", [{"seed": 1}])
|
||||
@pytest.mark.parametrize("test_llm_kwargs", [{"seed": 5}])
|
||||
@pytest.mark.parametrize("batch_size", [1, 8, 32])
|
||||
@pytest.mark.parametrize("temperature", [0.1, 1.0])
|
||||
@pytest.mark.parametrize(
|
||||
@ -30,15 +31,26 @@ from .conftest import run_equality_correctness_test
|
||||
# Use smaller output len for fast test.
|
||||
10,
|
||||
])
|
||||
@pytest.mark.parametrize("seed", [1])
|
||||
def test_seeded_consistency(baseline_llm_generator, batch_size: int,
|
||||
temperature: float, output_len: int):
|
||||
@pytest.mark.parametrize("seed", [None])
|
||||
def test_seeded_consistency(baseline_llm_generator, test_llm_generator,
|
||||
batch_size: int, temperature: float,
|
||||
output_len: int):
|
||||
"""Verify outputs are consistent across multiple runs with same seed
|
||||
"""
|
||||
run_equality_correctness_test(baseline_llm_generator,
|
||||
baseline_llm_generator,
|
||||
test_llm_generator,
|
||||
batch_size,
|
||||
max_output_len=output_len,
|
||||
temperature=temperature,
|
||||
seeded=True,
|
||||
force_output_len=True)
|
||||
|
||||
# Ensure this same test does fail if we _don't_ include per-request seeds
|
||||
with pytest.raises(AssertionError):
|
||||
run_equality_correctness_test(baseline_llm_generator,
|
||||
test_llm_generator,
|
||||
batch_size,
|
||||
max_output_len=output_len,
|
||||
temperature=temperature,
|
||||
seeded=False,
|
||||
force_output_len=True)
|
||||
|
Loading…
x
Reference in New Issue
Block a user