2024-03-29 13:06:40 +09:00
|
|
|
import contextlib
|
|
|
|
import gc
|
2023-12-13 12:28:13 -08:00
|
|
|
import os
|
2024-07-03 11:34:00 +08:00
|
|
|
import sys
|
2024-06-26 16:02:34 +08:00
|
|
|
from collections import UserList
|
2024-08-05 09:23:17 +08:00
|
|
|
from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar, Union
|
2023-09-01 11:19:43 +09:00
|
|
|
|
|
|
|
import pytest
|
|
|
|
import torch
|
2024-06-05 01:09:19 +08:00
|
|
|
import torch.nn as nn
|
2024-06-03 07:05:50 +08:00
|
|
|
import torch.nn.functional as F
|
2024-03-25 14:16:30 -07:00
|
|
|
from PIL import Image
|
2024-08-06 16:51:47 -04:00
|
|
|
from transformers import (AutoModelForCausalLM, AutoModelForSeq2SeqLM,
|
|
|
|
AutoModelForVision2Seq, AutoTokenizer, BatchEncoding,
|
|
|
|
BatchFeature)
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
from tests.models.utils import DecoderPromptType
|
2023-09-01 11:19:43 +09:00
|
|
|
from vllm import LLM, SamplingParams
|
2024-07-16 14:12:25 +08:00
|
|
|
from vllm.assets.image import ImageAsset
|
2024-07-02 00:57:09 -07:00
|
|
|
from vllm.config import TokenizerPoolConfig
|
2024-07-23 12:32:02 +08:00
|
|
|
from vllm.connections import global_http_connection
|
2024-06-12 17:27:08 -07:00
|
|
|
from vllm.distributed import (destroy_distributed_environment,
|
|
|
|
destroy_model_parallel)
|
2024-06-03 07:05:50 +08:00
|
|
|
from vllm.inputs import TextPrompt
|
2024-05-15 14:38:40 +08:00
|
|
|
from vllm.logger import init_logger
|
2024-08-06 16:51:47 -04:00
|
|
|
from vllm.outputs import RequestOutput
|
2024-07-02 00:57:09 -07:00
|
|
|
from vllm.sequence import SampleLogprobs
|
2024-07-20 12:17:24 +08:00
|
|
|
from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, cuda_device_count_stateless,
|
2024-08-06 16:51:47 -04:00
|
|
|
is_cpu, to_enc_dec_tuple_list,
|
|
|
|
zip_enc_dec_prompt_lists)
|
2024-06-30 01:06:13 -07:00
|
|
|
|
2024-05-15 14:38:40 +08:00
|
|
|
logger = init_logger(__name__)
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2023-12-27 02:37:21 +08:00
|
|
|
_TEST_DIR = os.path.dirname(__file__)
|
|
|
|
_TEST_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "example.txt")]
|
|
|
|
_LONG_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "summary.txt")]
|
2023-12-13 12:28:13 -08:00
|
|
|
|
|
|
|
|
2024-02-19 09:55:41 +02:00
|
|
|
def _read_prompts(filename: str) -> List[str]:
|
2023-12-13 12:28:13 -08:00
|
|
|
with open(filename, "r") as f:
|
2024-02-19 09:55:41 +02:00
|
|
|
prompts = f.readlines()
|
|
|
|
return prompts
|
2023-09-01 11:19:43 +09:00
|
|
|
|
|
|
|
|
2024-06-26 16:02:34 +08:00
|
|
|
class _ImageAssetPrompts(TypedDict):
|
|
|
|
stop_sign: str
|
|
|
|
cherry_blossom: str
|
2024-07-03 11:34:00 +08:00
|
|
|
|
|
|
|
|
|
|
|
if sys.version_info < (3, 9):
|
|
|
|
# UserList cannot be subscripted
|
|
|
|
class _ImageAssetsBase(UserList):
|
|
|
|
pass
|
|
|
|
else:
|
2024-06-26 16:02:34 +08:00
|
|
|
|
2024-07-03 11:34:00 +08:00
|
|
|
class _ImageAssetsBase(UserList[ImageAsset]):
|
|
|
|
pass
|
2024-06-26 16:02:34 +08:00
|
|
|
|
2024-07-03 11:34:00 +08:00
|
|
|
|
|
|
|
class _ImageAssets(_ImageAssetsBase):
|
2024-06-26 16:02:34 +08:00
|
|
|
|
|
|
|
def __init__(self) -> None:
|
2024-07-03 11:34:00 +08:00
|
|
|
super().__init__([
|
|
|
|
ImageAsset("stop_sign"),
|
|
|
|
ImageAsset("cherry_blossom"),
|
|
|
|
])
|
2024-06-26 16:02:34 +08:00
|
|
|
|
|
|
|
def prompts(self, prompts: _ImageAssetPrompts) -> List[str]:
|
|
|
|
"""
|
|
|
|
Convenience method to define the prompt for each test image.
|
|
|
|
|
|
|
|
The order of the returned prompts matches the order of the
|
|
|
|
assets when iterating through this object.
|
|
|
|
"""
|
2024-07-16 23:59:36 +08:00
|
|
|
return [prompts["stop_sign"], prompts["cherry_blossom"]]
|
2024-06-26 16:02:34 +08:00
|
|
|
|
|
|
|
|
|
|
|
IMAGE_ASSETS = _ImageAssets()
|
|
|
|
"""Singleton instance of :class:`_ImageAssets`."""
|
|
|
|
|
|
|
|
|
2024-07-23 12:32:02 +08:00
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def init_test_http_connection():
|
|
|
|
# pytest_asyncio may use a different event loop per test
|
|
|
|
# so we need to make sure the async client is created anew
|
|
|
|
global_http_connection.reuse_client = False
|
|
|
|
|
|
|
|
|
2024-03-29 13:06:40 +09:00
|
|
|
def cleanup():
|
|
|
|
destroy_model_parallel()
|
2024-06-12 17:27:08 -07:00
|
|
|
destroy_distributed_environment()
|
2024-03-29 13:06:40 +09:00
|
|
|
with contextlib.suppress(AssertionError):
|
|
|
|
torch.distributed.destroy_process_group()
|
|
|
|
gc.collect()
|
2024-06-04 01:39:50 +08:00
|
|
|
if not is_cpu():
|
|
|
|
torch.cuda.empty_cache()
|
2024-03-29 13:06:40 +09:00
|
|
|
|
|
|
|
|
2024-04-01 17:49:51 -07:00
|
|
|
@pytest.fixture()
|
2024-04-04 21:54:16 -07:00
|
|
|
def should_do_global_cleanup_after_test(request) -> bool:
|
2024-04-01 17:49:51 -07:00
|
|
|
"""Allow subdirectories to skip global cleanup by overriding this fixture.
|
|
|
|
This can provide a ~10x speedup for non-GPU unit tests since they don't need
|
|
|
|
to initialize torch.
|
|
|
|
"""
|
2024-04-04 21:54:16 -07:00
|
|
|
|
|
|
|
if request.node.get_closest_marker("skip_global_cleanup"):
|
|
|
|
return False
|
|
|
|
|
2024-04-01 17:49:51 -07:00
|
|
|
return True
|
|
|
|
|
|
|
|
|
2024-03-29 13:06:40 +09:00
|
|
|
@pytest.fixture(autouse=True)
|
2024-04-01 17:49:51 -07:00
|
|
|
def cleanup_fixture(should_do_global_cleanup_after_test: bool):
|
2024-03-29 13:06:40 +09:00
|
|
|
yield
|
2024-04-01 17:49:51 -07:00
|
|
|
if should_do_global_cleanup_after_test:
|
|
|
|
cleanup()
|
2024-03-29 13:06:40 +09:00
|
|
|
|
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
@pytest.fixture
|
|
|
|
def example_prompts() -> List[str]:
|
2023-12-13 12:28:13 -08:00
|
|
|
prompts = []
|
|
|
|
for filename in _TEST_PROMPTS:
|
2023-12-27 02:37:21 +08:00
|
|
|
prompts += _read_prompts(filename)
|
2023-12-13 12:28:13 -08:00
|
|
|
return prompts
|
|
|
|
|
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
@pytest.fixture
|
|
|
|
def example_encoder_decoder_prompts() \
|
|
|
|
-> Dict[DecoderPromptType,
|
|
|
|
Tuple[List[str], List[Optional[str]]]]:
|
|
|
|
'''
|
|
|
|
Returns an encoder prompt list and a decoder prompt list, wherein each pair
|
|
|
|
of same-index entries in both lists corresponds to an (encoder prompt,
|
|
|
|
decoder prompt) tuple.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
* Encoder prompt list
|
|
|
|
* Decoder prompt list (reverse of encoder prompt list)
|
|
|
|
'''
|
|
|
|
|
|
|
|
encoder_prompts = []
|
|
|
|
for filename in _TEST_PROMPTS:
|
|
|
|
encoder_prompts += _read_prompts(filename)
|
|
|
|
|
|
|
|
custom_decoder_prompts = encoder_prompts[::-1]
|
|
|
|
empty_str_decoder_prompts = [""] * len(encoder_prompts)
|
|
|
|
none_decoder_prompts = [None] * len(encoder_prompts)
|
|
|
|
|
|
|
|
# NONE decoder prompt type
|
|
|
|
return {
|
|
|
|
DecoderPromptType.NONE:
|
|
|
|
zip_enc_dec_prompt_lists(encoder_prompts, none_decoder_prompts),
|
|
|
|
DecoderPromptType.EMPTY_STR:
|
|
|
|
zip_enc_dec_prompt_lists(encoder_prompts, empty_str_decoder_prompts),
|
|
|
|
DecoderPromptType.CUSTOM:
|
|
|
|
zip_enc_dec_prompt_lists(encoder_prompts, custom_decoder_prompts),
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-12-13 12:28:13 -08:00
|
|
|
@pytest.fixture
|
|
|
|
def example_long_prompts() -> List[str]:
|
|
|
|
prompts = []
|
|
|
|
for filename in _LONG_PROMPTS:
|
2023-12-27 02:37:21 +08:00
|
|
|
prompts += _read_prompts(filename)
|
2023-12-13 12:28:13 -08:00
|
|
|
return prompts
|
2023-09-01 11:19:43 +09:00
|
|
|
|
|
|
|
|
2024-06-26 16:02:34 +08:00
|
|
|
@pytest.fixture(scope="session")
|
|
|
|
def image_assets() -> _ImageAssets:
|
|
|
|
return IMAGE_ASSETS
|
|
|
|
|
|
|
|
|
2024-07-25 11:59:30 +08:00
|
|
|
_T = TypeVar("_T", nn.Module, torch.Tensor, BatchEncoding, BatchFeature)
|
2024-05-11 11:30:37 -07:00
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
|
|
|
|
class HfRunner:
|
|
|
|
|
2024-06-05 01:09:19 +08:00
|
|
|
def wrap_device(self, input: _T) -> _T:
|
2024-06-04 01:39:50 +08:00
|
|
|
if not is_cpu():
|
|
|
|
return input.to("cuda")
|
|
|
|
else:
|
|
|
|
return input.to("cpu")
|
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
model_name: str,
|
|
|
|
dtype: str = "half",
|
2024-06-05 01:09:19 +08:00
|
|
|
*,
|
2024-06-18 10:34:33 +08:00
|
|
|
model_kwargs: Optional[Dict[str, Any]] = None,
|
2024-06-05 01:09:19 +08:00
|
|
|
is_embedding_model: bool = False,
|
|
|
|
is_vision_model: bool = False,
|
2024-08-06 16:51:47 -04:00
|
|
|
is_encoder_decoder_model: bool = False,
|
2023-09-01 11:19:43 +09:00
|
|
|
) -> None:
|
2024-07-20 12:17:24 +08:00
|
|
|
torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[dtype]
|
2024-05-15 14:38:40 +08:00
|
|
|
|
2024-03-25 14:16:30 -07:00
|
|
|
self.model_name = model_name
|
2024-05-15 14:38:40 +08:00
|
|
|
|
2024-06-05 01:09:19 +08:00
|
|
|
if is_embedding_model:
|
2024-05-11 11:30:37 -07:00
|
|
|
# Lazy init required for AMD CI
|
|
|
|
from sentence_transformers import SentenceTransformer
|
2024-06-04 01:39:50 +08:00
|
|
|
self.model = self.wrap_device(
|
|
|
|
SentenceTransformer(
|
|
|
|
model_name,
|
|
|
|
device="cpu",
|
|
|
|
).to(dtype=torch_dtype))
|
2024-05-11 11:30:37 -07:00
|
|
|
else:
|
2024-06-05 01:09:19 +08:00
|
|
|
if is_vision_model:
|
|
|
|
auto_cls = AutoModelForVision2Seq
|
2024-08-06 16:51:47 -04:00
|
|
|
elif is_encoder_decoder_model:
|
|
|
|
auto_cls = AutoModelForSeq2SeqLM
|
2024-06-05 01:09:19 +08:00
|
|
|
else:
|
|
|
|
auto_cls = AutoModelForCausalLM
|
|
|
|
|
2024-06-18 10:34:33 +08:00
|
|
|
model_kwargs = model_kwargs if model_kwargs is not None else {}
|
2024-06-04 01:39:50 +08:00
|
|
|
self.model = self.wrap_device(
|
2024-06-05 01:09:19 +08:00
|
|
|
auto_cls.from_pretrained(
|
2024-06-04 01:39:50 +08:00
|
|
|
model_name,
|
|
|
|
torch_dtype=torch_dtype,
|
|
|
|
trust_remote_code=True,
|
2024-06-18 10:34:33 +08:00
|
|
|
**model_kwargs,
|
2024-06-04 01:39:50 +08:00
|
|
|
))
|
2024-05-15 14:38:40 +08:00
|
|
|
|
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained(
|
|
|
|
model_name,
|
|
|
|
torch_dtype=torch_dtype,
|
|
|
|
trust_remote_code=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
try:
|
2024-06-30 01:06:13 -07:00
|
|
|
# don't put this import at the top level
|
|
|
|
# it will call torch.cuda.device_count()
|
|
|
|
from transformers import AutoProcessor # noqa: F401
|
2024-05-15 14:38:40 +08:00
|
|
|
self.processor = AutoProcessor.from_pretrained(
|
|
|
|
model_name,
|
|
|
|
torch_dtype=torch_dtype,
|
|
|
|
trust_remote_code=True,
|
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
logger.warning(
|
|
|
|
"Unable to auto-load processor from HuggingFace for "
|
|
|
|
"model %s. Using tokenizer instead.", model_name)
|
|
|
|
self.processor = self.tokenizer
|
2023-09-01 11:19:43 +09:00
|
|
|
|
|
|
|
def generate(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
2024-03-25 14:16:30 -07:00
|
|
|
images: Optional[List[Image.Image]] = None,
|
2024-07-03 11:34:00 +08:00
|
|
|
**kwargs: Any,
|
2024-06-03 07:05:50 +08:00
|
|
|
) -> List[Tuple[List[List[int]], List[str]]]:
|
2024-03-25 14:16:30 -07:00
|
|
|
if images:
|
|
|
|
assert len(prompts) == len(images)
|
2024-06-03 07:05:50 +08:00
|
|
|
|
|
|
|
outputs: List[Tuple[List[List[int]], List[str]]] = []
|
2024-03-25 14:16:30 -07:00
|
|
|
for i, prompt in enumerate(prompts):
|
2024-05-15 14:38:40 +08:00
|
|
|
processor_kwargs: Dict[str, Any] = {
|
|
|
|
"text": prompt,
|
|
|
|
"return_tensors": "pt",
|
|
|
|
}
|
|
|
|
if images is not None and images[i] is not None:
|
|
|
|
processor_kwargs["images"] = images[i]
|
|
|
|
|
|
|
|
inputs = self.processor(**processor_kwargs)
|
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
output_ids = self.model.generate(
|
2024-06-04 01:39:50 +08:00
|
|
|
**self.wrap_device(inputs),
|
2023-09-01 11:19:43 +09:00
|
|
|
use_cache=True,
|
|
|
|
**kwargs,
|
|
|
|
)
|
2024-06-03 07:05:50 +08:00
|
|
|
output_str = self.processor.batch_decode(
|
2023-09-01 11:19:43 +09:00
|
|
|
output_ids,
|
|
|
|
skip_special_tokens=True,
|
|
|
|
clean_up_tokenization_spaces=False,
|
2023-09-04 17:29:42 -07:00
|
|
|
)
|
|
|
|
output_ids = output_ids.cpu().tolist()
|
2023-09-01 11:19:43 +09:00
|
|
|
outputs.append((output_ids, output_str))
|
|
|
|
return outputs
|
|
|
|
|
|
|
|
def generate_greedy(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
max_tokens: int,
|
2024-06-03 07:05:50 +08:00
|
|
|
images: Optional[List[Image.Image]] = None,
|
2024-07-03 11:34:00 +08:00
|
|
|
**kwargs: Any,
|
2023-09-01 11:19:43 +09:00
|
|
|
) -> List[Tuple[List[int], str]]:
|
2023-09-04 17:29:42 -07:00
|
|
|
outputs = self.generate(prompts,
|
|
|
|
do_sample=False,
|
2024-03-25 14:16:30 -07:00
|
|
|
max_new_tokens=max_tokens,
|
2024-06-20 21:45:34 -07:00
|
|
|
images=images,
|
|
|
|
**kwargs)
|
2024-06-03 07:05:50 +08:00
|
|
|
|
|
|
|
return [(output_ids[0], output_str[0])
|
|
|
|
for output_ids, output_str in outputs]
|
2023-09-04 17:29:42 -07:00
|
|
|
|
|
|
|
def generate_beam_search(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
beam_width: int,
|
|
|
|
max_tokens: int,
|
2024-06-03 07:05:50 +08:00
|
|
|
) -> List[Tuple[List[List[int]], List[str]]]:
|
2023-09-04 17:29:42 -07:00
|
|
|
outputs = self.generate(prompts,
|
|
|
|
do_sample=False,
|
|
|
|
max_new_tokens=max_tokens,
|
|
|
|
num_beams=beam_width,
|
|
|
|
num_return_sequences=beam_width)
|
|
|
|
for i in range(len(outputs)):
|
|
|
|
output_ids, output_str = outputs[i]
|
|
|
|
for j in range(len(output_ids)):
|
|
|
|
output_ids[j] = [
|
|
|
|
x for x in output_ids[j]
|
|
|
|
if x != self.tokenizer.pad_token_id
|
|
|
|
]
|
|
|
|
outputs[i] = (output_ids, output_str)
|
|
|
|
return outputs
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2023-10-16 10:56:50 -07:00
|
|
|
def generate_greedy_logprobs(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
max_tokens: int,
|
2024-07-03 11:34:00 +08:00
|
|
|
images: Optional[List[Image.Image]] = None,
|
|
|
|
**kwargs: Any,
|
2023-10-16 10:56:50 -07:00
|
|
|
) -> List[List[torch.Tensor]]:
|
2024-07-03 11:34:00 +08:00
|
|
|
all_logprobs: List[List[torch.Tensor]] = []
|
|
|
|
for i, prompt in enumerate(prompts):
|
|
|
|
processor_kwargs: Dict[str, Any] = {
|
|
|
|
"text": prompt,
|
|
|
|
"return_tensors": "pt",
|
|
|
|
}
|
|
|
|
if images is not None and images[i] is not None:
|
|
|
|
processor_kwargs["images"] = images[i]
|
|
|
|
|
|
|
|
inputs = self.processor(**processor_kwargs)
|
|
|
|
|
2023-10-16 10:56:50 -07:00
|
|
|
output = self.model.generate(
|
2024-07-03 11:34:00 +08:00
|
|
|
**self.wrap_device(inputs),
|
2023-10-16 10:56:50 -07:00
|
|
|
use_cache=True,
|
|
|
|
do_sample=False,
|
|
|
|
max_new_tokens=max_tokens,
|
|
|
|
output_hidden_states=True,
|
|
|
|
return_dict_in_generate=True,
|
2024-07-03 11:34:00 +08:00
|
|
|
**kwargs,
|
2023-10-16 10:56:50 -07:00
|
|
|
)
|
2024-07-03 11:34:00 +08:00
|
|
|
seq_logprobs: List[torch.Tensor] = []
|
2023-10-16 10:56:50 -07:00
|
|
|
for hidden_states in output.hidden_states:
|
|
|
|
last_hidden_states = hidden_states[-1][0]
|
|
|
|
logits = torch.matmul(
|
|
|
|
last_hidden_states,
|
|
|
|
self.model.get_output_embeddings().weight.t(),
|
|
|
|
)
|
|
|
|
if self.model.get_output_embeddings().bias is not None:
|
|
|
|
logits += self.model.get_output_embeddings(
|
|
|
|
).bias.unsqueeze(0)
|
2024-06-03 07:05:50 +08:00
|
|
|
logprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
|
2023-10-16 10:56:50 -07:00
|
|
|
seq_logprobs.append(logprobs)
|
|
|
|
all_logprobs.append(seq_logprobs)
|
|
|
|
return all_logprobs
|
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
def _hidden_states_to_logprobs(
|
|
|
|
self,
|
|
|
|
hidden_states,
|
|
|
|
num_logprobs,
|
|
|
|
) -> Tuple[List[Dict[int, float]], int]:
|
|
|
|
seq_logprobs: List[torch.Tensor] = []
|
|
|
|
output_len = len(hidden_states)
|
|
|
|
for _, hidden_state in enumerate(hidden_states):
|
|
|
|
last_hidden_states = hidden_state[-1][0]
|
|
|
|
logits = torch.matmul(
|
|
|
|
last_hidden_states,
|
|
|
|
self.model.get_output_embeddings().weight.t(),
|
|
|
|
)
|
|
|
|
if getattr(self.model.get_output_embeddings(), "bias",
|
|
|
|
None) is not None:
|
|
|
|
logits += self.model.get_output_embeddings().bias.unsqueeze(0)
|
|
|
|
logprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
|
|
|
|
seq_logprobs.append(logprobs)
|
|
|
|
|
|
|
|
# convert to dict
|
|
|
|
seq_logprobs_lst: List[Dict[int, float]] = []
|
|
|
|
for tok_idx, tok_logprobs in enumerate(seq_logprobs):
|
|
|
|
# drop prompt logprobs
|
|
|
|
if tok_idx == 0:
|
|
|
|
tok_logprobs = tok_logprobs[-1, :].reshape(1, -1)
|
|
|
|
topk = tok_logprobs.topk(num_logprobs)
|
|
|
|
|
|
|
|
tok_logprobs_dct = {}
|
|
|
|
for token_id, logprob in zip(topk.indices[0], topk.values[0]):
|
|
|
|
tok_logprobs_dct[token_id.item()] = logprob.item()
|
|
|
|
|
|
|
|
seq_logprobs_lst.append(tok_logprobs_dct)
|
|
|
|
|
|
|
|
return (
|
|
|
|
seq_logprobs_lst,
|
|
|
|
output_len,
|
|
|
|
)
|
|
|
|
|
2024-05-09 00:44:35 +09:00
|
|
|
def generate_greedy_logprobs_limit(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
2024-07-03 11:34:00 +08:00
|
|
|
images: Optional[List[Image.Image]] = None,
|
|
|
|
**kwargs: Any,
|
2024-06-03 07:05:50 +08:00
|
|
|
) -> List[Tuple[List[int], str, List[Dict[int, float]]]]:
|
|
|
|
all_logprobs: List[List[Dict[int, float]]] = []
|
|
|
|
all_output_ids: List[List[int]] = []
|
|
|
|
all_output_strs: List[str] = []
|
2024-05-09 00:44:35 +09:00
|
|
|
|
2024-07-03 11:34:00 +08:00
|
|
|
for i, prompt in enumerate(prompts):
|
|
|
|
processor_kwargs: Dict[str, Any] = {
|
|
|
|
"text": prompt,
|
|
|
|
"return_tensors": "pt",
|
|
|
|
}
|
|
|
|
if images is not None and images[i] is not None:
|
|
|
|
processor_kwargs["images"] = images[i]
|
|
|
|
|
|
|
|
inputs = self.processor(**processor_kwargs)
|
|
|
|
|
2024-05-09 00:44:35 +09:00
|
|
|
output = self.model.generate(
|
2024-07-03 11:34:00 +08:00
|
|
|
**self.wrap_device(inputs),
|
2024-05-09 00:44:35 +09:00
|
|
|
use_cache=True,
|
|
|
|
do_sample=False,
|
|
|
|
max_new_tokens=max_tokens,
|
|
|
|
output_hidden_states=True,
|
|
|
|
return_dict_in_generate=True,
|
2024-07-03 11:34:00 +08:00
|
|
|
**kwargs,
|
2024-05-09 00:44:35 +09:00
|
|
|
)
|
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
(
|
|
|
|
seq_logprobs_lst,
|
|
|
|
output_len,
|
|
|
|
) = self._hidden_states_to_logprobs(output.hidden_states,
|
|
|
|
num_logprobs)
|
|
|
|
|
|
|
|
all_logprobs.append(seq_logprobs_lst)
|
|
|
|
seq_ids = output.sequences[0]
|
|
|
|
output_len = len(seq_logprobs_lst)
|
|
|
|
output_ids = seq_ids[-output_len:]
|
|
|
|
all_output_ids.append(output_ids.tolist())
|
|
|
|
all_output_strs.append(self.tokenizer.decode(output_ids))
|
2024-05-09 00:44:35 +09:00
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
outputs = zip(all_output_ids, all_output_strs, all_logprobs)
|
|
|
|
return [(output_ids, output_str, output_logprobs)
|
|
|
|
for output_ids, output_str, output_logprobs in outputs]
|
|
|
|
|
|
|
|
def generate_encoder_decoder_greedy_logprobs_limit(
|
|
|
|
self,
|
|
|
|
encoder_decoder_prompts: Tuple[List[str], List[str]],
|
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
|
|
|
**kwargs: Any,
|
|
|
|
) -> List[Tuple[List[int], str, List[Dict[int, float]]]]:
|
|
|
|
'''
|
|
|
|
Greedy logprobs generation for vLLM encoder/decoder models
|
|
|
|
'''
|
2024-05-09 00:44:35 +09:00
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
all_logprobs: List[List[Dict[int, float]]] = []
|
|
|
|
all_output_ids: List[List[int]] = []
|
|
|
|
all_output_strs: List[str] = []
|
2024-05-09 00:44:35 +09:00
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
for (encoder_prompt,
|
|
|
|
decoder_prompt) in to_enc_dec_tuple_list(encoder_decoder_prompts):
|
|
|
|
encoder_input_ids = self.wrap_device(
|
|
|
|
self.tokenizer(encoder_prompt, return_tensors="pt").input_ids)
|
|
|
|
decoder_input_ids = (
|
|
|
|
None if decoder_prompt is None else self.wrap_device(
|
|
|
|
self.tokenizer(decoder_prompt,
|
|
|
|
return_tensors="pt").input_ids))
|
|
|
|
|
|
|
|
output = self.model.generate(
|
|
|
|
encoder_input_ids,
|
|
|
|
decoder_input_ids=decoder_input_ids,
|
|
|
|
use_cache=True,
|
|
|
|
do_sample=False,
|
|
|
|
max_new_tokens=max_tokens,
|
|
|
|
output_hidden_states=True,
|
|
|
|
return_dict_in_generate=True,
|
|
|
|
**kwargs,
|
|
|
|
)
|
|
|
|
|
|
|
|
(
|
|
|
|
seq_logprobs_lst,
|
|
|
|
output_len,
|
|
|
|
) = self._hidden_states_to_logprobs(output.decoder_hidden_states,
|
|
|
|
num_logprobs)
|
2024-05-09 00:44:35 +09:00
|
|
|
|
|
|
|
all_logprobs.append(seq_logprobs_lst)
|
|
|
|
seq_ids = output.sequences[0]
|
|
|
|
output_ids = seq_ids[-output_len:]
|
|
|
|
all_output_ids.append(output_ids.tolist())
|
|
|
|
all_output_strs.append(self.tokenizer.decode(output_ids))
|
|
|
|
|
|
|
|
outputs = zip(all_output_ids, all_output_strs, all_logprobs)
|
|
|
|
return [(output_ids, output_str, output_logprobs)
|
|
|
|
for output_ids, output_str, output_logprobs in outputs]
|
|
|
|
|
2024-05-11 11:30:37 -07:00
|
|
|
def encode(self, prompts: List[str]) -> List[List[torch.Tensor]]:
|
|
|
|
return self.model.encode(prompts)
|
|
|
|
|
2024-06-07 22:31:32 -07:00
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_value, traceback):
|
2024-03-29 13:06:40 +09:00
|
|
|
del self.model
|
|
|
|
cleanup()
|
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2024-06-20 10:09:21 +08:00
|
|
|
@pytest.fixture(scope="session")
|
2023-09-01 11:19:43 +09:00
|
|
|
def hf_runner():
|
|
|
|
return HfRunner
|
|
|
|
|
|
|
|
|
|
|
|
class VllmRunner:
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
model_name: str,
|
|
|
|
tokenizer_name: Optional[str] = None,
|
2024-03-29 13:06:40 +09:00
|
|
|
# Use smaller max model length, otherwise bigger model cannot run due
|
|
|
|
# to kv cache size limit.
|
2024-06-03 07:05:50 +08:00
|
|
|
max_model_len: int = 1024,
|
2023-09-01 11:19:43 +09:00
|
|
|
dtype: str = "half",
|
2024-02-19 09:55:41 +02:00
|
|
|
disable_log_stats: bool = True,
|
2024-02-18 16:44:50 -08:00
|
|
|
tensor_parallel_size: int = 1,
|
2024-03-29 02:06:01 +09:00
|
|
|
block_size: int = 16,
|
|
|
|
enable_chunked_prefill: bool = False,
|
2024-06-03 07:05:50 +08:00
|
|
|
swap_space: int = 4,
|
2024-08-06 16:51:47 -04:00
|
|
|
enforce_eager: Optional[bool] = False,
|
2024-02-25 19:54:00 +00:00
|
|
|
**kwargs,
|
2023-09-01 11:19:43 +09:00
|
|
|
) -> None:
|
|
|
|
self.model = LLM(
|
|
|
|
model=model_name,
|
|
|
|
tokenizer=tokenizer_name,
|
|
|
|
trust_remote_code=True,
|
|
|
|
dtype=dtype,
|
2024-05-02 11:24:13 +09:00
|
|
|
swap_space=swap_space,
|
2024-06-20 10:09:21 +08:00
|
|
|
enforce_eager=enforce_eager,
|
2024-02-19 09:55:41 +02:00
|
|
|
disable_log_stats=disable_log_stats,
|
2024-02-18 16:44:50 -08:00
|
|
|
tensor_parallel_size=tensor_parallel_size,
|
2024-03-29 13:06:40 +09:00
|
|
|
max_model_len=max_model_len,
|
2024-03-29 02:06:01 +09:00
|
|
|
block_size=block_size,
|
|
|
|
enable_chunked_prefill=enable_chunked_prefill,
|
2024-02-25 19:54:00 +00:00
|
|
|
**kwargs,
|
2023-09-01 11:19:43 +09:00
|
|
|
)
|
|
|
|
|
|
|
|
def generate(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
sampling_params: SamplingParams,
|
2024-07-03 11:34:00 +08:00
|
|
|
images: Optional[List[Image.Image]] = None,
|
2024-06-03 07:05:50 +08:00
|
|
|
) -> List[Tuple[List[List[int]], List[str]]]:
|
2024-03-25 14:16:30 -07:00
|
|
|
if images is not None:
|
2024-06-03 07:05:50 +08:00
|
|
|
assert len(prompts) == len(images)
|
2024-05-29 04:29:31 +08:00
|
|
|
|
2024-06-03 13:56:41 +08:00
|
|
|
inputs = [TextPrompt(prompt=prompt) for prompt in prompts]
|
|
|
|
if images is not None:
|
|
|
|
for i, image in enumerate(images):
|
2024-07-03 11:34:00 +08:00
|
|
|
inputs[i]["multi_modal_data"] = {"image": image}
|
2024-05-29 04:29:31 +08:00
|
|
|
|
2024-06-03 13:56:41 +08:00
|
|
|
req_outputs = self.model.generate(inputs,
|
2024-05-29 04:29:31 +08:00
|
|
|
sampling_params=sampling_params)
|
2024-06-03 07:05:50 +08:00
|
|
|
|
|
|
|
outputs: List[Tuple[List[List[int]], List[str]]] = []
|
2023-09-01 11:19:43 +09:00
|
|
|
for req_output in req_outputs:
|
|
|
|
prompt_str = req_output.prompt
|
|
|
|
prompt_ids = req_output.prompt_token_ids
|
2024-06-03 07:05:50 +08:00
|
|
|
req_sample_output_ids: List[List[int]] = []
|
|
|
|
req_sample_output_strs: List[str] = []
|
2023-09-04 17:29:42 -07:00
|
|
|
for sample in req_output.outputs:
|
|
|
|
output_str = sample.text
|
2024-07-01 23:10:37 -04:00
|
|
|
output_ids = list(sample.token_ids)
|
2023-09-04 17:29:42 -07:00
|
|
|
req_sample_output_ids.append(prompt_ids + output_ids)
|
|
|
|
req_sample_output_strs.append(prompt_str + output_str)
|
|
|
|
outputs.append((req_sample_output_ids, req_sample_output_strs))
|
2023-09-01 11:19:43 +09:00
|
|
|
return outputs
|
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
def _final_steps_generate_w_logprobs(
|
|
|
|
self,
|
|
|
|
req_outputs: List[RequestOutput],
|
|
|
|
) -> List[Tuple[List[int], str, Optional[SampleLogprobs]]]:
|
|
|
|
outputs: List[Tuple[List[int], str, Optional[SampleLogprobs]]] = []
|
|
|
|
for req_output in req_outputs:
|
|
|
|
for sample in req_output.outputs:
|
|
|
|
output_str = sample.text
|
|
|
|
output_ids = sample.token_ids
|
|
|
|
output_logprobs = sample.logprobs
|
|
|
|
outputs.append((output_ids, output_str, output_logprobs))
|
|
|
|
return outputs
|
|
|
|
|
2024-03-01 14:47:51 -06:00
|
|
|
def generate_w_logprobs(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
sampling_params: SamplingParams,
|
2024-07-03 11:34:00 +08:00
|
|
|
images: Optional[List[Image.Image]] = None,
|
2024-06-03 07:05:50 +08:00
|
|
|
) -> List[Tuple[List[int], str, Optional[SampleLogprobs]]]:
|
2024-03-01 14:47:51 -06:00
|
|
|
assert sampling_params.logprobs is not None
|
|
|
|
|
2024-07-03 11:34:00 +08:00
|
|
|
if images is not None:
|
|
|
|
assert len(prompts) == len(images)
|
|
|
|
|
|
|
|
inputs = [TextPrompt(prompt=prompt) for prompt in prompts]
|
|
|
|
if images is not None:
|
|
|
|
for i, image in enumerate(images):
|
|
|
|
inputs[i]["multi_modal_data"] = {"image": image}
|
|
|
|
|
|
|
|
req_outputs = self.model.generate(inputs,
|
2024-03-01 14:47:51 -06:00
|
|
|
sampling_params=sampling_params)
|
2024-08-06 16:51:47 -04:00
|
|
|
return self._final_steps_generate_w_logprobs(req_outputs)
|
|
|
|
|
|
|
|
def generate_encoder_decoder_w_logprobs(
|
|
|
|
self,
|
|
|
|
encoder_decoder_prompts: Tuple[List[str], List[str]],
|
|
|
|
sampling_params: SamplingParams,
|
|
|
|
) -> List[Tuple[List[int], str, Optional[SampleLogprobs]]]:
|
|
|
|
'''
|
|
|
|
Logprobs generation for vLLM encoder/decoder models
|
|
|
|
'''
|
|
|
|
|
|
|
|
assert sampling_params.logprobs is not None
|
|
|
|
req_outputs = self.model.generate(encoder_decoder_prompts,
|
|
|
|
sampling_params=sampling_params)
|
|
|
|
return self._final_steps_generate_w_logprobs(req_outputs)
|
2024-03-01 14:47:51 -06:00
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
def generate_greedy(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
max_tokens: int,
|
2024-07-03 11:34:00 +08:00
|
|
|
images: Optional[List[Image.Image]] = None,
|
2023-09-01 11:19:43 +09:00
|
|
|
) -> List[Tuple[List[int], str]]:
|
|
|
|
greedy_params = SamplingParams(temperature=0.0, max_tokens=max_tokens)
|
2024-03-25 14:16:30 -07:00
|
|
|
outputs = self.generate(prompts, greedy_params, images=images)
|
2023-09-05 19:27:23 -07:00
|
|
|
return [(output_ids[0], output_str[0])
|
|
|
|
for output_ids, output_str in outputs]
|
2023-09-04 17:29:42 -07:00
|
|
|
|
2024-03-01 14:47:51 -06:00
|
|
|
def generate_greedy_logprobs(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
2024-08-05 09:23:17 +08:00
|
|
|
images: Optional[Union[List[Image.Image],
|
|
|
|
List[List[Image.Image]]]] = None,
|
2024-07-25 11:59:30 +08:00
|
|
|
stop_token_ids: Optional[List[int]] = None,
|
2024-06-03 07:05:50 +08:00
|
|
|
) -> List[Tuple[List[int], str, Optional[SampleLogprobs]]]:
|
2024-03-01 14:47:51 -06:00
|
|
|
greedy_logprobs_params = SamplingParams(temperature=0.0,
|
|
|
|
max_tokens=max_tokens,
|
2024-07-25 11:59:30 +08:00
|
|
|
logprobs=num_logprobs,
|
|
|
|
stop_token_ids=stop_token_ids)
|
2024-07-03 11:34:00 +08:00
|
|
|
outputs = self.generate_w_logprobs(prompts,
|
|
|
|
greedy_logprobs_params,
|
|
|
|
images=images)
|
2024-03-01 14:47:51 -06:00
|
|
|
|
|
|
|
return [(output_ids, output_str, output_logprobs)
|
|
|
|
for output_ids, output_str, output_logprobs in outputs]
|
|
|
|
|
2024-08-06 16:51:47 -04:00
|
|
|
def generate_encoder_decoder_greedy_logprobs(
|
|
|
|
self,
|
|
|
|
encoder_decoder_prompts: Tuple[List[str], List[str]],
|
|
|
|
max_tokens: int,
|
|
|
|
num_logprobs: int,
|
|
|
|
) -> List[Tuple[List[int], str, Optional[SampleLogprobs]]]:
|
|
|
|
greedy_logprobs_params = SamplingParams(temperature=0.0,
|
|
|
|
use_beam_search=False,
|
|
|
|
max_tokens=max_tokens,
|
|
|
|
logprobs=num_logprobs)
|
|
|
|
'''
|
|
|
|
Greedy logprobs generation for vLLM encoder/decoder models
|
|
|
|
'''
|
|
|
|
|
|
|
|
outputs = self.generate_encoder_decoder_w_logprobs(
|
|
|
|
encoder_decoder_prompts, greedy_logprobs_params)
|
|
|
|
|
|
|
|
return [(output_ids, output_str, output_logprobs)
|
|
|
|
for output_ids, output_str, output_logprobs in outputs]
|
|
|
|
|
2023-09-04 17:29:42 -07:00
|
|
|
def generate_beam_search(
|
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
beam_width: int,
|
|
|
|
max_tokens: int,
|
2024-06-03 07:05:50 +08:00
|
|
|
) -> List[Tuple[List[List[int]], List[str]]]:
|
2023-09-04 17:29:42 -07:00
|
|
|
beam_search_params = SamplingParams(n=beam_width,
|
|
|
|
use_beam_search=True,
|
|
|
|
temperature=0.0,
|
|
|
|
max_tokens=max_tokens)
|
|
|
|
outputs = self.generate(prompts, beam_search_params)
|
|
|
|
return outputs
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2024-05-11 11:30:37 -07:00
|
|
|
def encode(self, prompts: List[str]) -> List[List[float]]:
|
|
|
|
req_outputs = self.model.encode(prompts)
|
|
|
|
outputs = []
|
|
|
|
for req_output in req_outputs:
|
|
|
|
embedding = req_output.outputs.embedding
|
|
|
|
outputs.append(embedding)
|
|
|
|
return outputs
|
|
|
|
|
2024-06-08 01:59:20 -07:00
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_value, traceback):
|
2024-03-29 13:06:40 +09:00
|
|
|
del self.model
|
|
|
|
cleanup()
|
|
|
|
|
2023-09-01 11:19:43 +09:00
|
|
|
|
2024-04-11 23:34:12 +01:00
|
|
|
@pytest.fixture(scope="session")
|
2023-09-01 11:19:43 +09:00
|
|
|
def vllm_runner():
|
|
|
|
return VllmRunner
|
2024-03-15 16:37:01 -07:00
|
|
|
|
|
|
|
|
|
|
|
def get_tokenizer_pool_config(tokenizer_group_type):
|
|
|
|
if tokenizer_group_type is None:
|
|
|
|
return None
|
|
|
|
if tokenizer_group_type == "ray":
|
|
|
|
return TokenizerPoolConfig(pool_size=1,
|
|
|
|
pool_type="ray",
|
|
|
|
extra_config={})
|
2024-07-19 18:25:06 -07:00
|
|
|
if isinstance(tokenizer_group_type, type):
|
|
|
|
return TokenizerPoolConfig(pool_size=1,
|
|
|
|
pool_type=tokenizer_group_type,
|
|
|
|
extra_config={})
|
2024-03-15 16:37:01 -07:00
|
|
|
raise ValueError(f"Unknown tokenizer_group_type: {tokenizer_group_type}")
|
2024-05-13 23:50:44 +09:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture()
|
|
|
|
def temporary_enable_log_propagate():
|
|
|
|
import logging
|
|
|
|
logger = logging.getLogger("vllm")
|
|
|
|
logger.propagate = True
|
|
|
|
yield
|
|
|
|
logger.propagate = False
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture()
|
|
|
|
def caplog_vllm(temporary_enable_log_propagate, caplog):
|
|
|
|
# To capture vllm log, we should enable propagate=True temporarily
|
|
|
|
# because caplog depends on logs propagated to the root logger.
|
|
|
|
yield caplog
|
2024-06-06 19:07:57 -07:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
|
|
def num_gpus_available():
|
|
|
|
"""Get number of GPUs without initializing the CUDA context
|
|
|
|
in current process."""
|
|
|
|
|
2024-06-13 16:06:49 -07:00
|
|
|
return cuda_device_count_stateless()
|