2024-08-13 16:24:17 -07:00
|
|
|
import json
|
2023-12-13 12:28:13 -08:00
|
|
|
import os
|
2024-07-03 11:34:00 +08:00
|
|
|
import sys
|
2024-08-13 16:24:17 -07:00
|
|
|
import tempfile
|
2024-06-26 16:02:34 +08:00
|
|
|
from collections import UserList
|
2024-08-09 10:39:41 +08:00
|
|
|
from enum import Enum
|
2024-09-14 01:20:06 +08:00
|
|
|
from typing import (Any, Callable, Dict, List, Optional, Tuple, Type,
|
|
|
|
TypedDict, TypeVar, Union)
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2024-08-21 15:49:39 -07:00
|
|
|
import numpy as np
|
2023-09-01 11:19:43 +09:00
|
|
|
import pytest
|
|
|
|
import torch
|
2024-06-05 01:09:19 +08:00
|
|
|
import torch.nn as nn
|
2024-06-03 07:05:50 +08:00
|
|
|
import torch.nn.functional as F
|
2024-08-13 16:24:17 -07:00
|
|
|
from huggingface_hub import snapshot_download
|
2024-03-25 14:16:30 -07:00
|
|
|
from PIL import Image
|
2024-08-21 15:49:39 -07:00
|
|
|
from transformers import (AutoModelForCausalLM, AutoTokenizer, BatchEncoding,
|
2024-08-06 16:51:47 -04:00
|
|
|
BatchFeature)
|
2024-09-14 01:20:06 +08:00
|
|
|
from transformers.models.auto.auto_factory import _BaseAutoModelClass
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2024-09-18 11:38:43 -04:00
|
|
|
from tests.models.utils import (TokensTextLogprobs,
|
|
|
|
TokensTextLogprobsPromptLogprobs)
|
2023-09-01 11:19:43 +09:00
|
|
|
from vllm import LLM, SamplingParams
|
2024-07-16 14:12:25 +08:00
|
|
|
from vllm.assets.image import ImageAsset
|
2024-09-11 13:21:36 +08:00
|
|
|
from vllm.assets.video import VideoAsset
|
2024-10-19 02:31:58 +08:00
|
|
|
from vllm.config import TaskOption, TokenizerPoolConfig
|
2024-07-23 12:32:02 +08:00
|
|
|
from vllm.connections import global_http_connection
|
2024-10-18 14:30:55 -07:00
|
|
|
from vllm.distributed import (cleanup_dist_env_and_memory,
|
2024-08-20 23:10:20 +08:00
|
|
|
init_distributed_environment,
|
|
|
|
initialize_model_parallel)
|
2024-08-09 10:39:41 +08:00
|
|
|
from vllm.inputs import (ExplicitEncoderDecoderPrompt, TextPrompt,
|
|
|
|
to_enc_dec_tuple_list, zip_enc_dec_prompts)
|
2024-05-15 14:38:40 +08:00
|
|
|
from vllm.logger import init_logger
|
2024-08-06 16:51:47 -04:00
|
|
|
from vllm.outputs import RequestOutput
|
2024-10-22 15:50:43 +08:00
|
|
|
from vllm.platforms import current_platform
|
2024-10-05 23:39:03 -07:00
|
|
|
from vllm.sampling_params import BeamSearchParams
|
2024-07-20 12:17:24 +08:00
|
|
|
from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, cuda_device_count_stateless,
|
2024-10-22 15:50:43 +08:00
|
|
|
identity)
|
2024-06-30 01:06:13 -07:00
|
|
|
|
2024-05-15 14:38:40 +08:00
|
|
|
logger = init_logger(__name__)
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2023-12-27 02:37:21 +08:00
|
|
|
_TEST_DIR = os.path.dirname(__file__)
|
|
|
|
_TEST_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "example.txt")]
|
|
|
|
_LONG_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "summary.txt")]
|
2023-12-13 12:28:13 -08:00
|
|
|
|
2024-10-23 11:35:29 +08:00
|
|
|
_M = TypeVar("_M")
|
|
|
|
_PromptMultiModalInput = Union[List[_M], List[List[_M]]]
|
|
|
|
|
|
|
|
PromptImageInput = _PromptMultiModalInput[Image.Image]
|
|
|
|
PromptAudioInput = _PromptMultiModalInput[Tuple[np.ndarray, int]]
|
|
|
|
PromptVideoInput = _PromptMultiModalInput[np.ndarray]
|
2024-08-27 16:09:02 -07:00
|
|
|
|
2023-12-13 12:28:13 -08:00
|
|
|
|
2024-02-19 09:55:41 +02:00
|
|
|
def _read_prompts(filename: str) -> List[str]:
|
2023-12-13 12:28:13 -08:00
|
|
|
with open(filename, "r") as f:
|
2024-02-19 09:55:41 +02:00
|
|
|
prompts = f.readlines()
|
|
|
|
return prompts
|
2023-09-01 11:19:43 +09:00
|
|
|
|
|
|
|
|
2024-06-26 16:02:34 +08:00
|
|
|
class _ImageAssetPrompts(TypedDict):
|
|
|
|
stop_sign: str
|
|
|
|
cherry_blossom: str
|
2024-07-03 11:34:00 +08:00
|
|
|
|
|
|
|
|
|
|
|
if sys.version_info < (3, 9):
|
|
|
|
# UserList cannot be subscripted
|
|
|
|
class _ImageAssetsBase(UserList):
|
|
|
|
pass
|
|
|
|
else:
|
2024-06-26 16:02:34 +08:00
|
|
|
|
2024-07-03 11:34:00 +08:00
|
|
|
class _ImageAssetsBase(UserList[ImageAsset]):
|
|
|
|
pass
|
2024-06-26 16:02:34 +08:00
|
|
|
|
2024-07-03 11:34:00 +08:00
|
|
|
|
|
|
|
class _ImageAssets(_ImageAssetsBase):
|
2024-06-26 16:02:34 +08:00
|
|
|
|
|
|
|
def __init__(self) -> None:
|
2024-07-03 11:34:00 +08:00
|
|
|
super().__init__([
|
|
|
|
ImageAsset("stop_sign"),
|
|
|
|
ImageAsset("cherry_blossom"),
|
|
|
|
])
|
2024-06-26 16:02:34 +08:00
|
|
|
|
|
|
|
def prompts(self, prompts: _ImageAssetPrompts) -> List[str]:
|
|
|
|
"""
|
|
|
|
Convenience method to define the prompt for each test image.
|
|
|
|
|
|
|
|
The order of the returned prompts matches the order of the
|
|
|
|
assets when iterating through this object.
|
|
|
|
"""
|
2024-07-16 23:59:36 +08:00
|
|
|
return [prompts["stop_sign"], prompts["cherry_blossom"]]
|
2024-06-26 16:02:34 +08:00
|
|
|
|
|
|
|
|
2024-09-11 13:21:36 +08:00
|
|
|
class _VideoAssetPrompts(TypedDict):
|
|
|
|
sample_demo_1: str
|
|
|
|
|
|
|
|
|
|
|
|
if sys.version_info < (3, 9):
|
|
|
|
# UserList cannot be subscripted
|
|
|
|
class _VideoAssetsBase(UserList):
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
|
|
|
|
class _VideoAssetsBase(UserList[VideoAsset]):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class _VideoAssets(_VideoAssetsBase):
|
|
|
|
|
|
|
|
def __init__(self) -> None:
|
|
|
|
super().__init__([
|
|
|
|
VideoAsset("sample_demo_1.mp4"),
|
|
|
|
])
|
|
|
|
|
|
|
|
def prompts(self, prompts: _VideoAssetPrompts) -> List[str]:
|
|
|
|
return [prompts["sample_demo_1"]]
|
|
|
|
|
|
|
|
|
2024-06-26 16:02:34 +08:00
|
|
|
IMAGE_ASSETS = _ImageAssets()
|
|
|
|
"""Singleton instance of :class:`_ImageAssets`."""
|
2024-09-11 13:21:36 +08:00
|
|
|
VIDEO_ASSETS = _VideoAssets()
|
|
|
|
"""Singleton instance of :class:`_VideoAssets`."""
|
2024-06-26 16:02:34 +08:00
|
|
|
|
|
|
|
|
2024-07-23 12:32:02 +08:00
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def init_test_http_connection():
|
|
|
|
# pytest_asyncio may use a different event loop per test
|
|
|
|
# so we need to make sure the async client is created anew
|
|
|
|
global_http_connection.reuse_client = False
|
|
|
|
|
|
|
|
|
2024-08-20 23:10:20 +08:00
|
|
|
@pytest.fixture
|
|
|
|
def dist_init():
|
|
|
|
temp_file = tempfile.mkstemp()[1]
|
|
|
|
init_distributed_environment(
|
|
|
|
world_size=1,
|
|
|
|
rank=0,
|
|
|
|
distributed_init_method=f"file://{temp_file}",
|
|
|
|
local_rank=0,
|
|
|
|
backend="nccl",
|
|
|
|
)
|
|
|
|
initialize_model_parallel(1, 1)
|
|
|
|
yield
|
2024-10-18 14:30:55 -07:00
|
|
|
cleanup_dist_env_and_memory()
|
2024-03-29 13:06:40 +09:00
|
|
|
|
|
|
|
|
2024-04-01 17:49:51 -07:00
|
|
|
@pytest.fixture()
|
2024-04-04 21:54:16 -07:00
|
|
|
def should_do_global_cleanup_after_test(request) -> bool:
|
2024-04-01 17:49:51 -07:00
|
|
|
"""Allow subdirectories to skip global cleanup by overriding this fixture.
|
|
|
|
This can provide a ~10x speedup for non-GPU unit tests since they don't need
|
|
|
|
to initialize torch.
|
|
|
|
"""
|
2024-04-04 21:54:16 -07:00
|
|
|
|
2024-09-18 07:00:56 -04:00
|
|
|
return not request.node.get_closest_marker("skip_global_cleanup")
|
2024-04-01 17:49:51 -07:00
|
|
|
|
|
|
|
|
2024-03-29 13:06:40 +09:00
|
|
|
@pytest.fixture(autouse=True)
|
2024-04-01 17:49:51 -07:00
|
|
|
def cleanup_fixture(should_do_global_cleanup_after_test: bool):
|
2024-03-29 13:06:40 +09:00
|
|
|
yield
|
2024-04-01 17:49:51 -07:00
|
|
|
if should_do_global_cleanup_after_test:
|
2024-10-18 14:30:55 -07:00
|
|
|
cleanup_dist_env_and_memory()
|
2024-03-29 13:06:40 +09:00
|
|
|
|
|
|
|
|
2024-09-25 10:35:52 -04:00
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def dynamo_reset():
|
|
|
|
yield
|
|
|
|
torch._dynamo.reset()
|
|
|
|
|
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
@pytest.fixture
|
|
|
|
def example_prompts() -> List[str]:
|
2023-12-13 12:28:13 -08:00
|
|
|
prompts = []
|
|
|
|
for filename in _TEST_PROMPTS:
|
2023-12-27 02:37:21 +08:00
|
|
|
prompts += _read_prompts(filename)
|
2023-12-13 12:28:13 -08:00
|
|
|
return prompts
|
|
|
|
|
|
|
|
|
2024-08-09 10:39:41 +08:00
|
|
|
class DecoderPromptType(Enum):
|
|
|
|
"""For encoder/decoder models only."""
|
|
|
|
CUSTOM = 1
|
|
|
|
NONE = 2
|
|
|
|
EMPTY_STR = 3
|
|
|
|
|
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
@pytest.fixture
|
2024-08-09 10:39:41 +08:00
|
|
|
def example_encoder_decoder_prompts(
|
|
|
|
) -> Dict[DecoderPromptType, List[ExplicitEncoderDecoderPrompt]]:
|
2024-08-06 16:51:47 -04:00
|
|
|
'''
|
|
|
|
Returns an encoder prompt list and a decoder prompt list, wherein each pair
|
|
|
|
of same-index entries in both lists corresponds to an (encoder prompt,
|
|
|
|
decoder prompt) tuple.
|
|
|
|
|
|
|
|
Returns:
|
2024-08-27 16:09:02 -07:00
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
* Encoder prompt list
|
|
|
|
* Decoder prompt list (reverse of encoder prompt list)
|
|
|
|
'''
|
|
|
|
|
|
|
|
encoder_prompts = []
|
|
|
|
for filename in _TEST_PROMPTS:
|
|
|
|
encoder_prompts += _read_prompts(filename)
|
|
|
|
|
|
|
|
custom_decoder_prompts = encoder_prompts[::-1]
|
|
|
|
empty_str_decoder_prompts = [""] * len(encoder_prompts)
|
|
|
|
none_decoder_prompts = [None] * len(encoder_prompts)
|
|
|
|
|
|
|
|
# NONE decoder prompt type
|
|
|
|
return {
|
|
|
|
DecoderPromptType.NONE:
|
2024-08-09 10:39:41 +08:00
|
|
|
zip_enc_dec_prompts(encoder_prompts, none_decoder_prompts),
|
2024-08-06 16:51:47 -04:00
|
|
|
DecoderPromptType.EMPTY_STR:
|
2024-08-09 10:39:41 +08:00
|
|
|
zip_enc_dec_prompts(encoder_prompts, empty_str_decoder_prompts),
|
2024-08-06 16:51:47 -04:00
|
|
|
DecoderPromptType.CUSTOM:
|
2024-08-09 10:39:41 +08:00
|
|
|
zip_enc_dec_prompts(encoder_prompts, custom_decoder_prompts),
|
2024-08-06 16:51:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-12-13 12:28:13 -08:00
|
|
|
@pytest.fixture
|
|
|
|
def example_long_prompts() -> List[str]:
|
|
|
|
prompts = []
|
|
|
|
for filename in _LONG_PROMPTS:
|
2023-12-27 02:37:21 +08:00
|
|
|
prompts += _read_prompts(filename)
|
2023-12-13 12:28:13 -08:00
|
|
|
return prompts
|
2023-09-01 11:19:43 +09:00
|
|
|
|
|
|
|
|
2024-06-26 16:02:34 +08:00
|
|
|
@pytest.fixture(scope="session")
|
|
|
|
def image_assets() -> _ImageAssets:
|
|
|
|
return IMAGE_ASSETS
|
|
|
|
|
|
|
|
|
2024-09-11 13:21:36 +08:00
|
|
|
@pytest.fixture(scope="session")
|
|
|
|
def video_assets() -> _VideoAssets:
|
|
|
|
return VIDEO_ASSETS
|
|
|
|
|
|
|
|
|
2024-10-25 01:40:40 +08:00
|
|
|
_T = TypeVar("_T", nn.Module, torch.Tensor, BatchEncoding, BatchFeature, dict)
|
2024-05-11 11:30:37 -07:00
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
|
|
|
|
class HfRunner:
|
|
|
|
|
2024-10-25 01:40:40 +08:00
|
|
|
def wrap_device(self, x: _T, device: Optional[str] = None) -> _T:
|
2024-09-29 00:54:35 +08:00
|
|
|
if device is None:
|
2024-10-25 01:40:40 +08:00
|
|
|
device = "cpu" if current_platform.is_cpu() else "cuda"
|
2024-09-29 00:54:35 +08:00
|
|
|
|
2024-10-25 01:40:40 +08:00
|
|
|
if isinstance(x, dict):
|
|
|
|
return {k: self.wrap_device(v, device) for k, v in x.items()}
|
2024-09-29 00:54:35 +08:00
|
|
|
|
2024-10-25 01:40:40 +08:00
|
|
|
if hasattr(x, "device") and x.device.type == device:
|
|
|
|
return x
|
|
|
|
|
|
|
|
return x.to(device)
|
2024-06-04 01:39:50 +08:00
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
model_name: str,
|
|
|
|
dtype: str = "half",
|
2024-06-05 01:09:19 +08:00
|
|
|
*,
|
2024-06-18 10:34:33 +08:00
|
|
|
model_kwargs: Optional[Dict[str, Any]] = None,
|
2024-10-23 18:42:47 +08:00
|
|
|
is_embedding_model: bool = False,
|
2024-10-16 14:31:00 +08:00
|
|
|
is_sentence_transformer: bool = False,
|
2024-10-23 18:42:47 +08:00
|
|
|
skip_tokenizer_init: bool = False,
|
2024-09-14 01:20:06 +08:00
|
|
|
auto_cls: Type[_BaseAutoModelClass] = AutoModelForCausalLM,
|
2024-10-30 10:32:17 -06:00
|
|
|
postprocess_inputs: Callable[..., BatchEncoding] = identity,
|
2023-09-01 11:19:43 +09:00
|
|
|
) -> None:
|
2024-07-20 12:17:24 +08:00
|
|
|
torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[dtype]
|
2024-05-15 14:38:40 +08:00
|
|
|
|
2024-03-25 14:16:30 -07:00
|
|
|
self.model_name = model_name
|
2024-05-15 14:38:40 +08:00
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
if is_sentence_transformer:
|
2024-05-11 11:30:37 -07:00
|
|
|
# Lazy init required for AMD CI
|
|
|
|
from sentence_transformers import SentenceTransformer
|
2024-06-04 01:39:50 +08:00
|
|
|
self.model = self.wrap_device(
|
|
|
|
SentenceTransformer(
|
|
|
|
model_name,
|
|
|
|
device="cpu",
|
2024-10-04 23:57:05 -07:00
|
|
|
trust_remote_code=True,
|
2024-06-04 01:39:50 +08:00
|
|
|
).to(dtype=torch_dtype))
|
2024-05-11 11:30:37 -07:00
|
|
|
else:
|
2024-06-18 10:34:33 +08:00
|
|
|
model_kwargs = model_kwargs if model_kwargs is not None else {}
|
2024-06-04 01:39:50 +08:00
|
|
|
self.model = self.wrap_device(
|
2024-06-05 01:09:19 +08:00
|
|
|
auto_cls.from_pretrained(
|
2024-06-04 01:39:50 +08:00
|
|
|
model_name,
|
|
|
|
torch_dtype=torch_dtype,
|
|
|
|
trust_remote_code=True,
|
2024-06-18 10:34:33 +08:00
|
|
|
**model_kwargs,
|
2024-06-04 01:39:50 +08:00
|
|
|
))
|
2024-05-15 14:38:40 +08:00
|
|
|
|
2024-10-23 18:42:47 +08:00
|
|
|
if not skip_tokenizer_init:
|
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained(
|
|
|
|
model_name,
|
|
|
|
torch_dtype=torch_dtype,
|
|
|
|
trust_remote_code=True,
|
|
|
|
)
|
2024-05-15 14:38:40 +08:00
|
|
|
|
2024-09-14 01:20:06 +08:00
|
|
|
# don't put this import at the top level
|
|
|
|
# it will call torch.cuda.device_count()
|
|
|
|
from transformers import AutoProcessor # noqa: F401
|
|
|
|
self.processor = AutoProcessor.from_pretrained(
|
|
|
|
model_name,
|
|
|
|
torch_dtype=torch_dtype,
|
|
|
|
trust_remote_code=True,
|
|
|
|
)
|
2024-10-23 18:42:47 +08:00
|
|
|
if skip_tokenizer_init:
|
|
|
|
self.tokenizer = self.processor.tokenizer
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2024-10-30 10:32:17 -06:00
|
|
|
self.dtype = dtype
|
2024-08-13 13:33:41 +08:00
|
|
|
self.postprocess_inputs = postprocess_inputs
|
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
def get_inputs(
|
2023-09-01 11:19:43 +09:00
|
|
|
self,
|
|
|
|
prompts: List[str],
|
2024-09-07 10:57:24 +08:00
|
|
|
images: Optional[PromptImageInput] = None,
|
2024-10-16 14:31:00 +08:00
|
|
|
videos: Optional[PromptVideoInput] = None,
|
|
|
|
audios: Optional[PromptAudioInput] = None,
|
|
|
|
) -> List[BatchEncoding]:
|
|
|
|
if images is not None:
|
2024-03-25 14:16:30 -07:00
|
|
|
assert len(prompts) == len(images)
|
2024-06-03 07:05:50 +08:00
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
if videos is not None:
|
|
|
|
assert len(prompts) == len(videos)
|
|
|
|
|
|
|
|
if audios is not None:
|
|
|
|
assert len(prompts) == len(audios)
|
|
|
|
|
|
|
|
all_inputs: List[BatchEncoding] = []
|
2024-03-25 14:16:30 -07:00
|
|
|
for i, prompt in enumerate(prompts):
|
2024-05-15 14:38:40 +08:00
|
|
|
processor_kwargs: Dict[str, Any] = {
|
|
|
|
"text": prompt,
|
|
|
|
"return_tensors": "pt",
|
|
|
|
}
|
2024-10-23 11:35:29 +08:00
|
|
|
if images is not None and (image := images[i]) is not None:
|
|
|
|
processor_kwargs["images"] = image
|
|
|
|
if videos is not None and (video := videos[i]) is not None:
|
|
|
|
processor_kwargs["videos"] = video
|
|
|
|
if audios is not None and (audio_tuple := audios[i]) is not None:
|
|
|
|
audio, sr = audio_tuple
|
2024-10-16 14:31:00 +08:00
|
|
|
processor_kwargs["audio"] = audio
|
|
|
|
processor_kwargs["sampling_rate"] = sr
|
2024-05-15 14:38:40 +08:00
|
|
|
|
|
|
|
inputs = self.processor(**processor_kwargs)
|
2024-10-30 10:32:17 -06:00
|
|
|
inputs = self.postprocess_inputs(inputs, dtype=self.dtype)
|
2024-05-15 14:38:40 +08:00
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
all_inputs.append(inputs)
|
|
|
|
|
|
|
|
return all_inputs
|
|
|
|
|
2024-10-27 02:53:35 +09:00
|
|
|
def classify(self, prompts: List[str]) -> List[str]:
|
|
|
|
# output is final logits
|
|
|
|
all_inputs = self.get_inputs(prompts)
|
|
|
|
outputs = []
|
|
|
|
for inputs in all_inputs:
|
|
|
|
output = self.model(**self.wrap_device(inputs))
|
|
|
|
logits = output.logits.softmax(dim=-1)[0].tolist()
|
|
|
|
outputs.append(logits)
|
|
|
|
|
|
|
|
return outputs
|
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
def generate(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
images: Optional[PromptImageInput] = None,
|
2024-10-23 11:35:29 +08:00
|
|
|
videos: Optional[PromptVideoInput] = None,
|
2024-10-16 14:31:00 +08:00
|
|
|
audios: Optional[PromptAudioInput] = None,
|
|
|
|
**kwargs: Any,
|
|
|
|
) -> List[Tuple[List[List[int]], List[str]]]:
|
|
|
|
all_inputs = self.get_inputs(prompts,
|
|
|
|
images=images,
|
|
|
|
videos=videos,
|
|
|
|
audios=audios)
|
|
|
|
|
|
|
|
outputs: List[Tuple[List[List[int]], List[str]]] = []
|
|
|
|
for inputs in all_inputs:
|
2023-09-01 11:19:43 +09:00
|
|
|
output_ids = self.model.generate(
|
2024-09-29 00:54:35 +08:00
|
|
|
**self.wrap_device(inputs, device=self.model.device.type),
|
2023-09-01 11:19:43 +09:00
|
|
|
use_cache=True,
|
|
|
|
**kwargs,
|
|
|
|
)
|
2024-06-03 07:05:50 +08:00
|
|
|
output_str = self.processor.batch_decode(
|
2023-09-01 11:19:43 +09:00
|
|
|
output_ids,
|
|
|
|
skip_special_tokens=True,
|
|
|
|
clean_up_tokenization_spaces=False,
|
2023-09-04 17:29:42 -07:00
|
|
|
)
|
|
|
|
output_ids = output_ids.cpu().tolist()
|
2023-09-01 11:19:43 +09:00
|
|
|
outputs.append((output_ids, output_str))
|
|
|
|
return outputs
|
|
|
|
|
|
|
|
def generate_greedy(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
max_tokens: int,
|
2024-09-07 10:57:24 +08:00
|
|
|
images: Optional[PromptImageInput] = None,
|
2024-10-23 11:35:29 +08:00
|
|
|
videos: Optional[PromptVideoInput] = None,
|
2024-10-16 14:31:00 +08:00
|
|
|
audios: Optional[PromptAudioInput] = None,
|
2024-07-03 11:34:00 +08:00
|
|
|
**kwargs: Any,
|
2023-09-01 11:19:43 +09:00
|
|
|
) -> List[Tuple[List[int], str]]:
|
2023-09-04 17:29:42 -07:00
|
|
|
outputs = self.generate(prompts,
|
|
|
|
do_sample=False,
|
2024-03-25 14:16:30 -07:00
|
|
|
max_new_tokens=max_tokens,
|
2024-06-20 21:45:34 -07:00
|
|
|
images=images,
|
2024-10-16 14:31:00 +08:00
|
|
|
videos=videos,
|
|
|
|
audios=audios,
|
2024-06-20 21:45:34 -07:00
|
|
|
**kwargs)
|
2024-06-03 07:05:50 +08:00
|
|
|
|
|
|
|
return [(output_ids[0], output_str[0])
|
|
|
|
for output_ids, output_str in outputs]
|
2023-09-04 17:29:42 -07:00
|
|
|
|
|
|
|
def generate_beam_search(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
beam_width: int,
|
|
|
|
max_tokens: int,
|
2024-06-03 07:05:50 +08:00
|
|
|
) -> List[Tuple[List[List[int]], List[str]]]:
|
2023-09-04 17:29:42 -07:00
|
|
|
outputs = self.generate(prompts,
|
|
|
|
do_sample=False,
|
|
|
|
max_new_tokens=max_tokens,
|
|
|
|
num_beams=beam_width,
|
|
|
|
num_return_sequences=beam_width)
|
|
|
|
for i in range(len(outputs)):
|
|
|
|
output_ids, output_str = outputs[i]
|
|
|
|
for j in range(len(output_ids)):
|
|
|
|
output_ids[j] = [
|
|
|
|
x for x in output_ids[j]
|
|
|
|
if x != self.tokenizer.pad_token_id
|
|
|
|
]
|
|
|
|
outputs[i] = (output_ids, output_str)
|
|
|
|
return outputs
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2023-10-16 10:56:50 -07:00
|
|
|
def generate_greedy_logprobs(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
max_tokens: int,
|
2024-09-07 10:57:24 +08:00
|
|
|
images: Optional[PromptImageInput] = None,
|
2024-10-23 11:35:29 +08:00
|
|
|
videos: Optional[PromptVideoInput] = None,
|
2024-10-16 14:31:00 +08:00
|
|
|
audios: Optional[PromptAudioInput] = None,
|
2024-07-03 11:34:00 +08:00
|
|
|
**kwargs: Any,
|
2023-10-16 10:56:50 -07:00
|
|
|
) -> List[List[torch.Tensor]]:
|
2024-10-16 14:31:00 +08:00
|
|
|
all_inputs = self.get_inputs(prompts,
|
|
|
|
images=images,
|
|
|
|
videos=videos,
|
|
|
|
audios=audios)
|
2024-07-03 11:34:00 +08:00
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
all_logprobs: List[List[torch.Tensor]] = []
|
|
|
|
for inputs in all_inputs:
|
2023-10-16 10:56:50 -07:00
|
|
|
output = self.model.generate(
|
2024-09-29 00:54:35 +08:00
|
|
|
**self.wrap_device(inputs, device=self.model.device.type),
|
2023-10-16 10:56:50 -07:00
|
|
|
use_cache=True,
|
|
|
|
do_sample=False,
|
|
|
|
max_new_tokens=max_tokens,
|
|
|
|
output_hidden_states=True,
|
|
|
|
return_dict_in_generate=True,
|
2024-07-03 11:34:00 +08:00
|
|
|
**kwargs,
|
2023-10-16 10:56:50 -07:00
|
|
|
)
|
2024-09-29 00:54:35 +08:00
|
|
|
seq_logprobs = self._hidden_states_to_seq_logprobs(
|
|
|
|
output.hidden_states)
|
2023-10-16 10:56:50 -07:00
|
|
|
all_logprobs.append(seq_logprobs)
|
|
|
|
return all_logprobs
|
|
|
|
|
2024-09-29 00:54:35 +08:00
|
|
|
def _hidden_states_to_seq_logprobs(
|
2024-08-06 16:51:47 -04:00
|
|
|
self,
|
2024-09-29 00:54:35 +08:00
|
|
|
hidden_states: Tuple[Tuple[torch.Tensor, ...], ...],
|
|
|
|
) -> List[torch.Tensor]:
|
|
|
|
output_embeddings = self.model.get_output_embeddings()
|
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
seq_logprobs: List[torch.Tensor] = []
|
|
|
|
for _, hidden_state in enumerate(hidden_states):
|
|
|
|
last_hidden_states = hidden_state[-1][0]
|
|
|
|
logits = torch.matmul(
|
2024-09-29 00:54:35 +08:00
|
|
|
last_hidden_states.to(output_embeddings.weight.device),
|
|
|
|
output_embeddings.weight.t(),
|
2024-08-06 16:51:47 -04:00
|
|
|
)
|
2024-09-29 00:54:35 +08:00
|
|
|
if getattr(output_embeddings, "bias", None) is not None:
|
|
|
|
logits += output_embeddings.bias.unsqueeze(0)
|
2024-08-06 16:51:47 -04:00
|
|
|
logprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
|
|
|
|
seq_logprobs.append(logprobs)
|
|
|
|
|
2024-09-29 00:54:35 +08:00
|
|
|
return seq_logprobs
|
|
|
|
|
|
|
|
def _hidden_states_to_logprobs(
|
|
|
|
self,
|
|
|
|
hidden_states: Tuple[Tuple[torch.Tensor, ...], ...],
|
|
|
|
num_logprobs: int,
|
|
|
|
) -> Tuple[List[Dict[int, float]], int]:
|
|
|
|
seq_logprobs = self._hidden_states_to_seq_logprobs(hidden_states)
|
|
|
|
output_len = len(hidden_states)
|
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
# convert to dict
|
|
|
|
seq_logprobs_lst: List[Dict[int, float]] = []
|
|
|
|
for tok_idx, tok_logprobs in enumerate(seq_logprobs):
|
|
|
|
# drop prompt logprobs
|
|
|
|
if tok_idx == 0:
|
|
|
|
tok_logprobs = tok_logprobs[-1, :].reshape(1, -1)
|
|
|
|
topk = tok_logprobs.topk(num_logprobs)
|
|
|
|
|
|
|
|
tok_logprobs_dct = {}
|
|
|
|
for token_id, logprob in zip(topk.indices[0], topk.values[0]):
|
|
|
|
tok_logprobs_dct[token_id.item()] = logprob.item()
|
|
|
|
|
|
|
|
seq_logprobs_lst.append(tok_logprobs_dct)
|
|
|
|
|
|
|
|
return (
|
|
|
|
seq_logprobs_lst,
|
|
|
|
output_len,
|
|
|
|
)
|
|
|
|
|
2024-05-09 00:44:35 +09:00
|
|
|
def generate_greedy_logprobs_limit(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
2024-09-07 10:57:24 +08:00
|
|
|
images: Optional[PromptImageInput] = None,
|
|
|
|
audios: Optional[PromptAudioInput] = None,
|
2024-10-23 11:35:29 +08:00
|
|
|
videos: Optional[PromptVideoInput] = None,
|
2024-07-03 11:34:00 +08:00
|
|
|
**kwargs: Any,
|
2024-09-18 11:38:43 -04:00
|
|
|
) -> List[TokensTextLogprobs]:
|
2024-10-16 14:31:00 +08:00
|
|
|
all_inputs = self.get_inputs(prompts,
|
|
|
|
images=images,
|
|
|
|
videos=videos,
|
|
|
|
audios=audios)
|
|
|
|
|
2024-06-03 07:05:50 +08:00
|
|
|
all_logprobs: List[List[Dict[int, float]]] = []
|
|
|
|
all_output_ids: List[List[int]] = []
|
|
|
|
all_output_strs: List[str] = []
|
2024-05-09 00:44:35 +09:00
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
for inputs in all_inputs:
|
2024-05-09 00:44:35 +09:00
|
|
|
output = self.model.generate(
|
2024-09-29 00:54:35 +08:00
|
|
|
**self.wrap_device(inputs, device=self.model.device.type),
|
2024-05-09 00:44:35 +09:00
|
|
|
use_cache=True,
|
|
|
|
do_sample=False,
|
|
|
|
max_new_tokens=max_tokens,
|
|
|
|
output_hidden_states=True,
|
|
|
|
return_dict_in_generate=True,
|
2024-07-03 11:34:00 +08:00
|
|
|
**kwargs,
|
2024-05-09 00:44:35 +09:00
|
|
|
)
|
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
(
|
|
|
|
seq_logprobs_lst,
|
|
|
|
output_len,
|
|
|
|
) = self._hidden_states_to_logprobs(output.hidden_states,
|
|
|
|
num_logprobs)
|
|
|
|
|
|
|
|
all_logprobs.append(seq_logprobs_lst)
|
|
|
|
seq_ids = output.sequences[0]
|
|
|
|
output_len = len(seq_logprobs_lst)
|
|
|
|
output_ids = seq_ids[-output_len:]
|
|
|
|
all_output_ids.append(output_ids.tolist())
|
|
|
|
all_output_strs.append(self.tokenizer.decode(output_ids))
|
2024-05-09 00:44:35 +09:00
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
outputs = zip(all_output_ids, all_output_strs, all_logprobs)
|
|
|
|
return [(output_ids, output_str, output_logprobs)
|
|
|
|
for output_ids, output_str, output_logprobs in outputs]
|
|
|
|
|
|
|
|
def generate_encoder_decoder_greedy_logprobs_limit(
|
|
|
|
self,
|
2024-08-09 10:39:41 +08:00
|
|
|
encoder_decoder_prompts: List[ExplicitEncoderDecoderPrompt[str, str]],
|
2024-08-06 16:51:47 -04:00
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
2024-10-23 18:42:47 +08:00
|
|
|
images: Optional[PromptImageInput] = None,
|
2024-08-06 16:51:47 -04:00
|
|
|
**kwargs: Any,
|
2024-09-18 11:38:43 -04:00
|
|
|
) -> List[TokensTextLogprobs]:
|
2024-08-06 16:51:47 -04:00
|
|
|
'''
|
|
|
|
Greedy logprobs generation for vLLM encoder/decoder models
|
|
|
|
'''
|
2024-05-09 00:44:35 +09:00
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
all_logprobs: List[List[Dict[int, float]]] = []
|
|
|
|
all_output_ids: List[List[int]] = []
|
|
|
|
all_output_strs: List[str] = []
|
2024-05-09 00:44:35 +09:00
|
|
|
|
2024-10-23 18:42:47 +08:00
|
|
|
for i, (encoder_prompt, decoder_prompt) in enumerate(
|
|
|
|
to_enc_dec_tuple_list(encoder_decoder_prompts)):
|
|
|
|
processor_kwargs: Dict[str, Any] = {
|
|
|
|
"text": encoder_prompt,
|
|
|
|
"return_tensors": "pt",
|
|
|
|
}
|
|
|
|
if images is not None and images[i] is not None:
|
|
|
|
processor_kwargs["images"] = images[i]
|
2024-09-29 00:54:35 +08:00
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
encoder_input_ids = self.wrap_device(
|
2024-10-23 18:42:47 +08:00
|
|
|
self.processor(**processor_kwargs).input_ids,
|
2024-09-29 00:54:35 +08:00
|
|
|
device=self.model.device.type,
|
|
|
|
)
|
|
|
|
|
|
|
|
if decoder_prompt is None:
|
|
|
|
decoder_input_ids = None
|
|
|
|
else:
|
|
|
|
decoder_input_ids = self.wrap_device(
|
2024-08-06 16:51:47 -04:00
|
|
|
self.tokenizer(decoder_prompt,
|
2024-09-29 00:54:35 +08:00
|
|
|
return_tensors="pt").input_ids,
|
|
|
|
device=self.model.device.type,
|
|
|
|
)
|
2024-08-06 16:51:47 -04:00
|
|
|
|
|
|
|
output = self.model.generate(
|
|
|
|
encoder_input_ids,
|
|
|
|
decoder_input_ids=decoder_input_ids,
|
|
|
|
use_cache=True,
|
|
|
|
do_sample=False,
|
|
|
|
max_new_tokens=max_tokens,
|
|
|
|
output_hidden_states=True,
|
|
|
|
return_dict_in_generate=True,
|
|
|
|
**kwargs,
|
|
|
|
)
|
|
|
|
|
|
|
|
(
|
|
|
|
seq_logprobs_lst,
|
|
|
|
output_len,
|
|
|
|
) = self._hidden_states_to_logprobs(output.decoder_hidden_states,
|
|
|
|
num_logprobs)
|
2024-05-09 00:44:35 +09:00
|
|
|
|
|
|
|
all_logprobs.append(seq_logprobs_lst)
|
|
|
|
seq_ids = output.sequences[0]
|
|
|
|
output_ids = seq_ids[-output_len:]
|
|
|
|
all_output_ids.append(output_ids.tolist())
|
|
|
|
all_output_strs.append(self.tokenizer.decode(output_ids))
|
|
|
|
|
|
|
|
outputs = zip(all_output_ids, all_output_strs, all_logprobs)
|
|
|
|
return [(output_ids, output_str, output_logprobs)
|
|
|
|
for output_ids, output_str, output_logprobs in outputs]
|
|
|
|
|
2024-05-11 11:30:37 -07:00
|
|
|
def encode(self, prompts: List[str]) -> List[List[torch.Tensor]]:
|
|
|
|
return self.model.encode(prompts)
|
|
|
|
|
2024-06-07 22:31:32 -07:00
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_value, traceback):
|
2024-03-29 13:06:40 +09:00
|
|
|
del self.model
|
2024-10-18 14:30:55 -07:00
|
|
|
cleanup_dist_env_and_memory()
|
2024-03-29 13:06:40 +09:00
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2024-06-20 10:09:21 +08:00
|
|
|
@pytest.fixture(scope="session")
|
2023-09-01 11:19:43 +09:00
|
|
|
def hf_runner():
|
|
|
|
return HfRunner
|
|
|
|
|
|
|
|
|
|
|
|
class VllmRunner:
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
model_name: str,
|
2024-10-19 02:31:58 +08:00
|
|
|
task: TaskOption = "auto",
|
2023-09-01 11:19:43 +09:00
|
|
|
tokenizer_name: Optional[str] = None,
|
2024-03-29 13:06:40 +09:00
|
|
|
# Use smaller max model length, otherwise bigger model cannot run due
|
|
|
|
# to kv cache size limit.
|
2024-06-03 07:05:50 +08:00
|
|
|
max_model_len: int = 1024,
|
2023-09-01 11:19:43 +09:00
|
|
|
dtype: str = "half",
|
2024-02-19 09:55:41 +02:00
|
|
|
disable_log_stats: bool = True,
|
2024-02-18 16:44:50 -08:00
|
|
|
tensor_parallel_size: int = 1,
|
2024-03-29 02:06:01 +09:00
|
|
|
block_size: int = 16,
|
|
|
|
enable_chunked_prefill: bool = False,
|
2024-06-03 07:05:50 +08:00
|
|
|
swap_space: int = 4,
|
2024-08-06 16:51:47 -04:00
|
|
|
enforce_eager: Optional[bool] = False,
|
2024-02-25 19:54:00 +00:00
|
|
|
**kwargs,
|
2023-09-01 11:19:43 +09:00
|
|
|
) -> None:
|
|
|
|
self.model = LLM(
|
|
|
|
model=model_name,
|
2024-10-19 02:31:58 +08:00
|
|
|
task=task,
|
2023-09-01 11:19:43 +09:00
|
|
|
tokenizer=tokenizer_name,
|
|
|
|
trust_remote_code=True,
|
|
|
|
dtype=dtype,
|
2024-05-02 11:24:13 +09:00
|
|
|
swap_space=swap_space,
|
2024-06-20 10:09:21 +08:00
|
|
|
enforce_eager=enforce_eager,
|
2024-02-19 09:55:41 +02:00
|
|
|
disable_log_stats=disable_log_stats,
|
2024-02-18 16:44:50 -08:00
|
|
|
tensor_parallel_size=tensor_parallel_size,
|
2024-03-29 13:06:40 +09:00
|
|
|
max_model_len=max_model_len,
|
2024-03-29 02:06:01 +09:00
|
|
|
block_size=block_size,
|
|
|
|
enable_chunked_prefill=enable_chunked_prefill,
|
2024-02-25 19:54:00 +00:00
|
|
|
**kwargs,
|
2023-09-01 11:19:43 +09:00
|
|
|
)
|
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
def get_inputs(
|
2023-09-01 11:19:43 +09:00
|
|
|
self,
|
|
|
|
prompts: List[str],
|
2024-08-27 16:09:02 -07:00
|
|
|
images: Optional[PromptImageInput] = None,
|
2024-10-16 14:31:00 +08:00
|
|
|
videos: Optional[PromptVideoInput] = None,
|
|
|
|
audios: Optional[PromptAudioInput] = None,
|
|
|
|
) -> List[TextPrompt]:
|
2024-03-25 14:16:30 -07:00
|
|
|
if images is not None:
|
2024-06-03 07:05:50 +08:00
|
|
|
assert len(prompts) == len(images)
|
2024-05-29 04:29:31 +08:00
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
if videos is not None:
|
|
|
|
assert len(prompts) == len(videos)
|
|
|
|
|
|
|
|
if audios is not None:
|
|
|
|
assert len(prompts) == len(audios)
|
|
|
|
|
2024-06-03 13:56:41 +08:00
|
|
|
inputs = [TextPrompt(prompt=prompt) for prompt in prompts]
|
|
|
|
if images is not None:
|
|
|
|
for i, image in enumerate(images):
|
2024-10-23 11:35:29 +08:00
|
|
|
if image is not None:
|
|
|
|
inputs[i]["multi_modal_data"] = {"image": image}
|
2024-05-29 04:29:31 +08:00
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
if videos is not None:
|
|
|
|
for i, video in enumerate(videos):
|
2024-10-23 11:35:29 +08:00
|
|
|
if video is not None:
|
|
|
|
inputs[i]["multi_modal_data"] = {"video": video}
|
2024-10-16 14:31:00 +08:00
|
|
|
|
|
|
|
if audios is not None:
|
|
|
|
for i, audio in enumerate(audios):
|
2024-10-23 11:35:29 +08:00
|
|
|
if audio is not None:
|
|
|
|
inputs[i]["multi_modal_data"] = {"audio": audio}
|
2024-10-16 14:31:00 +08:00
|
|
|
|
|
|
|
return inputs
|
|
|
|
|
2024-10-27 02:53:35 +09:00
|
|
|
def classify(self, prompts: List[str]) -> List[str]:
|
|
|
|
req_outputs = self.model.encode(prompts)
|
|
|
|
outputs = []
|
|
|
|
for req_output in req_outputs:
|
|
|
|
embedding = req_output.outputs.embedding
|
|
|
|
outputs.append(embedding)
|
|
|
|
return outputs
|
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
def generate(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
sampling_params: SamplingParams,
|
|
|
|
images: Optional[PromptImageInput] = None,
|
|
|
|
videos: Optional[PromptVideoInput] = None,
|
|
|
|
audios: Optional[PromptAudioInput] = None,
|
|
|
|
) -> List[Tuple[List[List[int]], List[str]]]:
|
|
|
|
inputs = self.get_inputs(prompts,
|
|
|
|
images=images,
|
|
|
|
videos=videos,
|
|
|
|
audios=audios)
|
|
|
|
|
2024-06-03 13:56:41 +08:00
|
|
|
req_outputs = self.model.generate(inputs,
|
2024-05-29 04:29:31 +08:00
|
|
|
sampling_params=sampling_params)
|
2024-06-03 07:05:50 +08:00
|
|
|
|
|
|
|
outputs: List[Tuple[List[List[int]], List[str]]] = []
|
2023-09-01 11:19:43 +09:00
|
|
|
for req_output in req_outputs:
|
|
|
|
prompt_str = req_output.prompt
|
|
|
|
prompt_ids = req_output.prompt_token_ids
|
2024-06-03 07:05:50 +08:00
|
|
|
req_sample_output_ids: List[List[int]] = []
|
|
|
|
req_sample_output_strs: List[str] = []
|
2023-09-04 17:29:42 -07:00
|
|
|
for sample in req_output.outputs:
|
|
|
|
output_str = sample.text
|
2024-07-01 23:10:37 -04:00
|
|
|
output_ids = list(sample.token_ids)
|
2023-09-04 17:29:42 -07:00
|
|
|
req_sample_output_ids.append(prompt_ids + output_ids)
|
|
|
|
req_sample_output_strs.append(prompt_str + output_str)
|
|
|
|
outputs.append((req_sample_output_ids, req_sample_output_strs))
|
2023-09-01 11:19:43 +09:00
|
|
|
return outputs
|
|
|
|
|
2024-09-13 00:21:51 +02:00
|
|
|
@staticmethod
|
2024-08-06 16:51:47 -04:00
|
|
|
def _final_steps_generate_w_logprobs(
|
|
|
|
req_outputs: List[RequestOutput],
|
2024-09-18 11:38:43 -04:00
|
|
|
) -> List[TokensTextLogprobsPromptLogprobs]:
|
|
|
|
outputs: List[TokensTextLogprobsPromptLogprobs] = []
|
2024-08-06 16:51:47 -04:00
|
|
|
for req_output in req_outputs:
|
2024-09-18 11:38:43 -04:00
|
|
|
assert len(req_output.outputs) > 0
|
2024-08-06 16:51:47 -04:00
|
|
|
for sample in req_output.outputs:
|
|
|
|
output_str = sample.text
|
2024-08-13 13:33:41 +08:00
|
|
|
output_ids = list(sample.token_ids)
|
2024-08-06 16:51:47 -04:00
|
|
|
output_logprobs = sample.logprobs
|
2024-09-18 11:38:43 -04:00
|
|
|
outputs.append((output_ids, output_str, output_logprobs,
|
|
|
|
req_output.prompt_logprobs))
|
2024-08-06 16:51:47 -04:00
|
|
|
return outputs
|
|
|
|
|
2024-03-01 14:47:51 -06:00
|
|
|
def generate_w_logprobs(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
sampling_params: SamplingParams,
|
2024-08-27 16:09:02 -07:00
|
|
|
images: Optional[PromptImageInput] = None,
|
|
|
|
audios: Optional[PromptAudioInput] = None,
|
2024-09-11 13:21:36 +08:00
|
|
|
videos: Optional[PromptVideoInput] = None,
|
2024-09-18 11:38:43 -04:00
|
|
|
) -> Union[List[TokensTextLogprobs],
|
|
|
|
List[TokensTextLogprobsPromptLogprobs]]:
|
2024-10-16 14:31:00 +08:00
|
|
|
inputs = self.get_inputs(prompts,
|
|
|
|
images=images,
|
|
|
|
videos=videos,
|
|
|
|
audios=audios)
|
2024-09-11 13:21:36 +08:00
|
|
|
|
2024-07-03 11:34:00 +08:00
|
|
|
req_outputs = self.model.generate(inputs,
|
2024-03-01 14:47:51 -06:00
|
|
|
sampling_params=sampling_params)
|
2024-09-18 11:38:43 -04:00
|
|
|
|
|
|
|
toks_str_logsprobs_prompt_logprobs = (
|
|
|
|
self._final_steps_generate_w_logprobs(req_outputs))
|
|
|
|
# Omit prompt logprobs if not required by sampling params
|
|
|
|
return ([x[0:-1] for x in toks_str_logsprobs_prompt_logprobs]
|
|
|
|
if sampling_params.prompt_logprobs is None else
|
|
|
|
toks_str_logsprobs_prompt_logprobs)
|
2024-08-06 16:51:47 -04:00
|
|
|
|
|
|
|
def generate_encoder_decoder_w_logprobs(
|
|
|
|
self,
|
2024-08-09 10:39:41 +08:00
|
|
|
encoder_decoder_prompts: List[ExplicitEncoderDecoderPrompt[str, str]],
|
2024-08-06 16:51:47 -04:00
|
|
|
sampling_params: SamplingParams,
|
2024-09-18 11:38:43 -04:00
|
|
|
) -> Union[List[TokensTextLogprobs],
|
|
|
|
List[TokensTextLogprobsPromptLogprobs]]:
|
2024-08-06 16:51:47 -04:00
|
|
|
'''
|
|
|
|
Logprobs generation for vLLM encoder/decoder models
|
|
|
|
'''
|
|
|
|
|
|
|
|
assert sampling_params.logprobs is not None
|
|
|
|
req_outputs = self.model.generate(encoder_decoder_prompts,
|
|
|
|
sampling_params=sampling_params)
|
2024-09-18 11:38:43 -04:00
|
|
|
toks_str_logsprobs_prompt_logprobs = (
|
|
|
|
self._final_steps_generate_w_logprobs(req_outputs))
|
|
|
|
# Omit prompt logprobs if not required by sampling params
|
|
|
|
return ([x[0:-1] for x in toks_str_logsprobs_prompt_logprobs]
|
|
|
|
if sampling_params.prompt_logprobs is None else
|
|
|
|
toks_str_logsprobs_prompt_logprobs)
|
2024-03-01 14:47:51 -06:00
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
def generate_greedy(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
max_tokens: int,
|
2024-09-07 10:57:24 +08:00
|
|
|
images: Optional[PromptImageInput] = None,
|
2024-10-16 14:31:00 +08:00
|
|
|
videos: Optional[PromptVideoInput] = None,
|
|
|
|
audios: Optional[PromptAudioInput] = None,
|
2023-09-01 11:19:43 +09:00
|
|
|
) -> List[Tuple[List[int], str]]:
|
|
|
|
greedy_params = SamplingParams(temperature=0.0, max_tokens=max_tokens)
|
2024-10-16 14:31:00 +08:00
|
|
|
outputs = self.generate(prompts,
|
|
|
|
greedy_params,
|
|
|
|
images=images,
|
|
|
|
videos=videos,
|
|
|
|
audios=audios)
|
2023-09-05 19:27:23 -07:00
|
|
|
return [(output_ids[0], output_str[0])
|
|
|
|
for output_ids, output_str in outputs]
|
2023-09-04 17:29:42 -07:00
|
|
|
|
2024-03-01 14:47:51 -06:00
|
|
|
def generate_greedy_logprobs(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
2024-09-18 11:38:43 -04:00
|
|
|
num_prompt_logprobs: Optional[int] = None,
|
2024-08-27 16:09:02 -07:00
|
|
|
images: Optional[PromptImageInput] = None,
|
|
|
|
audios: Optional[PromptAudioInput] = None,
|
2024-09-11 13:21:36 +08:00
|
|
|
videos: Optional[PromptVideoInput] = None,
|
2024-07-25 11:59:30 +08:00
|
|
|
stop_token_ids: Optional[List[int]] = None,
|
2024-09-18 11:38:43 -04:00
|
|
|
) -> Union[List[TokensTextLogprobs],
|
|
|
|
List[TokensTextLogprobsPromptLogprobs]]:
|
|
|
|
greedy_logprobs_params = SamplingParams(
|
|
|
|
temperature=0.0,
|
|
|
|
max_tokens=max_tokens,
|
|
|
|
logprobs=num_logprobs,
|
2024-09-24 18:29:56 -06:00
|
|
|
prompt_logprobs=num_prompt_logprobs,
|
2024-09-18 11:38:43 -04:00
|
|
|
stop_token_ids=stop_token_ids)
|
|
|
|
|
|
|
|
return self.generate_w_logprobs(prompts,
|
|
|
|
greedy_logprobs_params,
|
|
|
|
images=images,
|
|
|
|
audios=audios,
|
|
|
|
videos=videos)
|
2024-03-01 14:47:51 -06:00
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
def generate_encoder_decoder_greedy_logprobs(
|
|
|
|
self,
|
2024-08-09 10:39:41 +08:00
|
|
|
encoder_decoder_prompts: List[ExplicitEncoderDecoderPrompt[str, str]],
|
2024-08-06 16:51:47 -04:00
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
2024-09-18 11:38:43 -04:00
|
|
|
num_prompt_logprobs: Optional[int] = None,
|
|
|
|
) -> Union[List[TokensTextLogprobs],
|
|
|
|
List[TokensTextLogprobsPromptLogprobs]]:
|
|
|
|
greedy_logprobs_params = SamplingParams(
|
|
|
|
temperature=0.0,
|
|
|
|
max_tokens=max_tokens,
|
|
|
|
logprobs=num_logprobs,
|
|
|
|
prompt_logprobs=(num_prompt_logprobs),
|
|
|
|
)
|
2024-08-06 16:51:47 -04:00
|
|
|
'''
|
|
|
|
Greedy logprobs generation for vLLM encoder/decoder models
|
|
|
|
'''
|
|
|
|
|
2024-09-18 11:38:43 -04:00
|
|
|
return self.generate_encoder_decoder_w_logprobs(
|
2024-08-06 16:51:47 -04:00
|
|
|
encoder_decoder_prompts, greedy_logprobs_params)
|
|
|
|
|
2023-09-04 17:29:42 -07:00
|
|
|
def generate_beam_search(
|
2024-09-23 22:08:12 -07:00
|
|
|
self,
|
|
|
|
prompts: Union[List[str], List[List[int]]],
|
|
|
|
beam_width: int,
|
|
|
|
max_tokens: int,
|
|
|
|
) -> List[Tuple[List[List[int]], List[str]]]:
|
2024-10-05 23:39:03 -07:00
|
|
|
outputs = self.model.beam_search(
|
|
|
|
prompts,
|
|
|
|
BeamSearchParams(beam_width=beam_width, max_tokens=max_tokens))
|
2024-09-23 22:08:12 -07:00
|
|
|
returned_outputs = []
|
|
|
|
for output in outputs:
|
|
|
|
token_ids = [x.tokens for x in output.sequences]
|
|
|
|
texts = [x.text for x in output.sequences]
|
|
|
|
returned_outputs.append((token_ids, texts))
|
|
|
|
return returned_outputs
|
|
|
|
|
2024-10-23 11:35:29 +08:00
|
|
|
def encode(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
images: Optional[PromptImageInput] = None,
|
|
|
|
videos: Optional[PromptVideoInput] = None,
|
|
|
|
audios: Optional[PromptAudioInput] = None,
|
|
|
|
) -> List[List[float]]:
|
|
|
|
inputs = self.get_inputs(prompts,
|
|
|
|
images=images,
|
|
|
|
videos=videos,
|
|
|
|
audios=audios)
|
|
|
|
|
|
|
|
req_outputs = self.model.encode(inputs)
|
|
|
|
return [req_output.outputs.embedding for req_output in req_outputs]
|
2024-05-11 11:30:37 -07:00
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_value, traceback):
|
2024-03-29 13:06:40 +09:00
|
|
|
del self.model
|
2024-10-18 14:30:55 -07:00
|
|
|
cleanup_dist_env_and_memory()
|
2024-03-29 13:06:40 +09:00
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2024-04-11 23:34:12 +01:00
|
|
|
@pytest.fixture(scope="session")
|
2023-09-01 11:19:43 +09:00
|
|
|
def vllm_runner():
|
|
|
|
return VllmRunner
|
2024-03-15 16:37:01 -07:00
|
|
|
|
|
|
|
|
|
|
|
def get_tokenizer_pool_config(tokenizer_group_type):
|
|
|
|
if tokenizer_group_type is None:
|
|
|
|
return None
|
|
|
|
if tokenizer_group_type == "ray":
|
|
|
|
return TokenizerPoolConfig(pool_size=1,
|
|
|
|
pool_type="ray",
|
|
|
|
extra_config={})
|
2024-07-19 18:25:06 -07:00
|
|
|
if isinstance(tokenizer_group_type, type):
|
|
|
|
return TokenizerPoolConfig(pool_size=1,
|
|
|
|
pool_type=tokenizer_group_type,
|
|
|
|
extra_config={})
|
2024-03-15 16:37:01 -07:00
|
|
|
raise ValueError(f"Unknown tokenizer_group_type: {tokenizer_group_type}")
|
2024-05-13 23:50:44 +09:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture()
|
|
|
|
def temporary_enable_log_propagate():
|
|
|
|
import logging
|
|
|
|
logger = logging.getLogger("vllm")
|
|
|
|
logger.propagate = True
|
|
|
|
yield
|
|
|
|
logger.propagate = False
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture()
|
|
|
|
def caplog_vllm(temporary_enable_log_propagate, caplog):
|
|
|
|
# To capture vllm log, we should enable propagate=True temporarily
|
|
|
|
# because caplog depends on logs propagated to the root logger.
|
|
|
|
yield caplog
|
2024-06-06 19:07:57 -07:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
|
|
def num_gpus_available():
|
|
|
|
"""Get number of GPUs without initializing the CUDA context
|
|
|
|
in current process."""
|
|
|
|
|
2024-06-13 16:06:49 -07:00
|
|
|
return cuda_device_count_stateless()
|
2024-08-13 16:24:17 -07:00
|
|
|
|
|
|
|
|
|
|
|
temp_dir = tempfile.gettempdir()
|
2024-10-04 10:38:25 -07:00
|
|
|
_dummy_opt_path = os.path.join(temp_dir, "dummy_opt")
|
|
|
|
_dummy_llava_path = os.path.join(temp_dir, "dummy_llava")
|
2024-10-07 14:10:35 +08:00
|
|
|
_dummy_gemma2_embedding_path = os.path.join(temp_dir, "dummy_gemma2_embedding")
|
2024-08-13 16:24:17 -07:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def dummy_opt_path():
|
2024-10-04 10:38:25 -07:00
|
|
|
json_path = os.path.join(_dummy_opt_path, "config.json")
|
|
|
|
if not os.path.exists(_dummy_opt_path):
|
2024-08-13 16:24:17 -07:00
|
|
|
snapshot_download(repo_id="facebook/opt-125m",
|
2024-10-04 10:38:25 -07:00
|
|
|
local_dir=_dummy_opt_path,
|
2024-08-13 16:24:17 -07:00
|
|
|
ignore_patterns=[
|
|
|
|
"*.bin", "*.bin.index.json", "*.pt", "*.h5",
|
|
|
|
"*.msgpack"
|
|
|
|
])
|
|
|
|
assert os.path.exists(json_path)
|
|
|
|
with open(json_path, "r") as f:
|
|
|
|
config = json.load(f)
|
|
|
|
config["architectures"] = ["MyOPTForCausalLM"]
|
|
|
|
with open(json_path, "w") as f:
|
|
|
|
json.dump(config, f)
|
2024-10-04 10:38:25 -07:00
|
|
|
return _dummy_opt_path
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def dummy_llava_path():
|
|
|
|
json_path = os.path.join(_dummy_llava_path, "config.json")
|
|
|
|
if not os.path.exists(_dummy_llava_path):
|
|
|
|
snapshot_download(repo_id="llava-hf/llava-1.5-7b-hf",
|
|
|
|
local_dir=_dummy_llava_path,
|
|
|
|
ignore_patterns=[
|
|
|
|
"*.bin", "*.bin.index.json", "*.pt", "*.h5",
|
|
|
|
"*.msgpack"
|
|
|
|
])
|
|
|
|
assert os.path.exists(json_path)
|
|
|
|
with open(json_path, "r") as f:
|
|
|
|
config = json.load(f)
|
|
|
|
config["architectures"] = ["MyLlava"]
|
|
|
|
with open(json_path, "w") as f:
|
|
|
|
json.dump(config, f)
|
|
|
|
return _dummy_llava_path
|
2024-10-07 14:10:35 +08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def dummy_gemma2_embedding_path():
|
|
|
|
json_path = os.path.join(_dummy_gemma2_embedding_path, "config.json")
|
|
|
|
if not os.path.exists(_dummy_gemma2_embedding_path):
|
|
|
|
snapshot_download(repo_id="BAAI/bge-multilingual-gemma2",
|
|
|
|
local_dir=_dummy_gemma2_embedding_path,
|
|
|
|
ignore_patterns=[
|
|
|
|
"*.bin", "*.bin.index.json", "*.pt", "*.h5",
|
|
|
|
"*.msgpack"
|
|
|
|
])
|
|
|
|
assert os.path.exists(json_path)
|
|
|
|
with open(json_path, "r") as f:
|
|
|
|
config = json.load(f)
|
|
|
|
config["architectures"] = ["MyGemma2Embedding"]
|
|
|
|
with open(json_path, "w") as f:
|
|
|
|
json.dump(config, f)
|
|
|
|
return _dummy_gemma2_embedding_path
|