[Misc] Add pytest marker to opt-out of global test cleanup (#3863)

This commit is contained in:
Cade Daniel 2024-04-04 21:54:16 -07:00 committed by GitHub
parent d03d64fd2e
commit e5043a3e75
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 11 additions and 4 deletions

View File

@ -56,11 +56,15 @@ def cleanup():
@pytest.fixture() @pytest.fixture()
def should_do_global_cleanup_after_test() -> bool: def should_do_global_cleanup_after_test(request) -> bool:
"""Allow subdirectories to skip global cleanup by overriding this fixture. """Allow subdirectories to skip global cleanup by overriding this fixture.
This can provide a ~10x speedup for non-GPU unit tests since they don't need This can provide a ~10x speedup for non-GPU unit tests since they don't need
to initialize torch. to initialize torch.
""" """
if request.node.get_closest_marker("skip_global_cleanup"):
return False
return True return True

View File

@ -7,6 +7,7 @@ from .utils import create_seq_group_metadata_from_prompts, mock_worker
@pytest.mark.parametrize('num_target_seq_ids', [100]) @pytest.mark.parametrize('num_target_seq_ids', [100])
@pytest.mark.skip_global_cleanup
def test_create_target_seq_id_iterator(num_target_seq_ids: int): def test_create_target_seq_id_iterator(num_target_seq_ids: int):
"""Verify all new sequence ids are greater than all input """Verify all new sequence ids are greater than all input
seq ids. seq ids.
@ -27,6 +28,7 @@ def test_create_target_seq_id_iterator(num_target_seq_ids: int):
@pytest.mark.parametrize('k', [1, 2, 6]) @pytest.mark.parametrize('k', [1, 2, 6])
@pytest.mark.skip_global_cleanup
def test_get_token_ids_to_score(k: int): def test_get_token_ids_to_score(k: int):
"""Verify correct tokens are selected for scoring. """Verify correct tokens are selected for scoring.
""" """
@ -53,6 +55,7 @@ def test_get_token_ids_to_score(k: int):
@pytest.mark.parametrize('k', [1, 2, 6]) @pytest.mark.parametrize('k', [1, 2, 6])
@pytest.mark.skip_global_cleanup
def test_create_single_target_seq_group_metadata(k: int): def test_create_single_target_seq_group_metadata(k: int):
"""Verify correct creation of a batch-expanded seq group metadata. """Verify correct creation of a batch-expanded seq group metadata.
""" """

View File

@ -487,7 +487,7 @@ def test_empty_input_batch(k: int, batch_size: int):
**execute_model_data.to_dict()) **execute_model_data.to_dict())
@torch.inference_mode() @pytest.mark.skip_global_cleanup
def test_init_device(): def test_init_device():
"""Verify SpecDecodeWorker invokes proposer/scorer worker init_device, as """Verify SpecDecodeWorker invokes proposer/scorer worker init_device, as
well as other GPU initialization. well as other GPU initialization.
@ -537,7 +537,7 @@ def test_init_cache_engine():
@pytest.mark.parametrize('available_cpu_blocks', [500]) @pytest.mark.parametrize('available_cpu_blocks', [500])
@pytest.mark.parametrize('target_cache_block_size_bytes', [2 * 2 * 4096]) @pytest.mark.parametrize('target_cache_block_size_bytes', [2 * 2 * 4096])
@pytest.mark.parametrize('draft_kv_size_bytes', [0, 2 * 2 * 768, 2 * 2 * 4096]) @pytest.mark.parametrize('draft_kv_size_bytes', [0, 2 * 2 * 768, 2 * 2 * 4096])
@torch.inference_mode() @pytest.mark.skip_global_cleanup
def test_profile_num_available_blocks(available_gpu_blocks: int, def test_profile_num_available_blocks(available_gpu_blocks: int,
available_cpu_blocks: int, available_cpu_blocks: int,
target_cache_block_size_bytes: int, target_cache_block_size_bytes: int,
@ -584,7 +584,7 @@ def test_profile_num_available_blocks(available_gpu_blocks: int,
@pytest.mark.parametrize('target_cache_block_size_bytes', @pytest.mark.parametrize('target_cache_block_size_bytes',
[2 * 2 * 4096, 2 * 2 * 8192]) [2 * 2 * 4096, 2 * 2 * 8192])
@pytest.mark.parametrize('draft_kv_size_bytes', [0, 2 * 2 * 768, 2 * 2 * 4096]) @pytest.mark.parametrize('draft_kv_size_bytes', [0, 2 * 2 * 768, 2 * 2 * 4096])
@torch.inference_mode() @pytest.mark.skip_global_cleanup
def test_split_num_cache_blocks_evenly(available_gpu_blocks: int, def test_split_num_cache_blocks_evenly(available_gpu_blocks: int,
target_cache_block_size_bytes: int, target_cache_block_size_bytes: int,
draft_kv_size_bytes: int): draft_kv_size_bytes: int):