2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
2024-06-30 08:53:00 -07:00
|
|
|
import base64
|
|
|
|
|
|
|
|
import numpy as np
|
2024-06-14 02:21:53 +08:00
|
|
|
import openai
|
|
|
|
import pytest
|
2024-08-26 21:33:17 -07:00
|
|
|
import pytest_asyncio
|
2024-11-01 16:13:35 +08:00
|
|
|
import requests
|
|
|
|
|
2024-12-24 17:54:30 +08:00
|
|
|
from vllm.entrypoints.openai.protocol import EmbeddingResponse
|
2024-11-01 16:13:35 +08:00
|
|
|
from vllm.transformers_utils.tokenizer import get_tokenizer
|
2024-06-14 02:21:53 +08:00
|
|
|
|
2025-04-08 22:14:46 -06:00
|
|
|
from ...models.embedding.utils import check_embeddings_close
|
2024-07-12 21:51:48 -07:00
|
|
|
from ...utils import RemoteOpenAIServer
|
2024-06-14 02:21:53 +08:00
|
|
|
|
2025-02-28 08:50:43 +00:00
|
|
|
MODEL_NAME = "intfloat/multilingual-e5-small"
|
2024-11-01 16:13:35 +08:00
|
|
|
DUMMY_CHAT_TEMPLATE = """{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\\n'}}{% endfor %}""" # noqa: E501
|
2024-06-14 02:21:53 +08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
2024-11-01 16:13:35 +08:00
|
|
|
def server():
|
2024-07-17 15:43:21 +08:00
|
|
|
args = [
|
2024-12-24 17:54:30 +08:00
|
|
|
"--task",
|
|
|
|
"embed",
|
2024-07-17 15:43:21 +08:00
|
|
|
# use half precision for speed and memory savings in CI environment
|
|
|
|
"--dtype",
|
|
|
|
"bfloat16",
|
|
|
|
"--enforce-eager",
|
|
|
|
"--max-model-len",
|
2025-02-28 16:56:44 +00:00
|
|
|
"512",
|
2024-11-01 16:13:35 +08:00
|
|
|
"--chat-template",
|
|
|
|
DUMMY_CHAT_TEMPLATE,
|
2024-07-17 15:43:21 +08:00
|
|
|
]
|
|
|
|
|
2024-11-01 16:13:35 +08:00
|
|
|
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
|
2024-07-12 21:51:48 -07:00
|
|
|
yield remote_server
|
2024-06-14 02:21:53 +08:00
|
|
|
|
|
|
|
|
2024-08-26 21:33:17 -07:00
|
|
|
@pytest_asyncio.fixture
|
2024-11-01 16:13:35 +08:00
|
|
|
async def client(server):
|
|
|
|
async with server.get_async_client() as async_client:
|
2024-08-26 21:33:17 -07:00
|
|
|
yield async_client
|
2024-06-14 02:21:53 +08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
2024-11-01 16:13:35 +08:00
|
|
|
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
|
|
|
async def test_single_embedding(client: openai.AsyncOpenAI, model_name: str):
|
2024-06-14 02:21:53 +08:00
|
|
|
input_texts = [
|
|
|
|
"The chef prepared a delicious meal.",
|
|
|
|
]
|
|
|
|
|
|
|
|
# test single embedding
|
2024-12-24 17:54:30 +08:00
|
|
|
embedding_response = await client.embeddings.create(
|
2024-06-14 02:21:53 +08:00
|
|
|
model=model_name,
|
|
|
|
input=input_texts,
|
|
|
|
encoding_format="float",
|
|
|
|
)
|
2024-12-24 17:54:30 +08:00
|
|
|
embeddings = EmbeddingResponse.model_validate(
|
|
|
|
embedding_response.model_dump(mode="json"))
|
|
|
|
|
2024-06-14 02:21:53 +08:00
|
|
|
assert embeddings.id is not None
|
|
|
|
assert len(embeddings.data) == 1
|
2025-02-28 16:56:44 +00:00
|
|
|
assert len(embeddings.data[0].embedding) == 384
|
2024-06-14 02:21:53 +08:00
|
|
|
assert embeddings.usage.completion_tokens == 0
|
2025-02-28 16:56:44 +00:00
|
|
|
assert embeddings.usage.prompt_tokens == 11
|
|
|
|
assert embeddings.usage.total_tokens == 11
|
2024-06-14 02:21:53 +08:00
|
|
|
|
|
|
|
# test using token IDs
|
|
|
|
input_tokens = [1, 1, 1, 1, 1]
|
2024-12-24 17:54:30 +08:00
|
|
|
embedding_response = await client.embeddings.create(
|
2024-06-14 02:21:53 +08:00
|
|
|
model=model_name,
|
|
|
|
input=input_tokens,
|
|
|
|
encoding_format="float",
|
|
|
|
)
|
2024-12-24 17:54:30 +08:00
|
|
|
embeddings = EmbeddingResponse.model_validate(
|
|
|
|
embedding_response.model_dump(mode="json"))
|
|
|
|
|
2024-06-14 02:21:53 +08:00
|
|
|
assert embeddings.id is not None
|
|
|
|
assert len(embeddings.data) == 1
|
2025-02-28 16:56:44 +00:00
|
|
|
assert len(embeddings.data[0].embedding) == 384
|
2024-06-14 02:21:53 +08:00
|
|
|
assert embeddings.usage.completion_tokens == 0
|
|
|
|
assert embeddings.usage.prompt_tokens == 5
|
|
|
|
assert embeddings.usage.total_tokens == 5
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
2024-11-01 16:13:35 +08:00
|
|
|
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
|
|
|
async def test_batch_embedding(client: openai.AsyncOpenAI, model_name: str):
|
2025-03-03 01:34:51 +00:00
|
|
|
# test list[str]
|
2024-06-14 02:21:53 +08:00
|
|
|
input_texts = [
|
|
|
|
"The cat sat on the mat.", "A feline was resting on a rug.",
|
|
|
|
"Stars twinkle brightly in the night sky."
|
|
|
|
]
|
2024-12-24 17:54:30 +08:00
|
|
|
embedding_response = await client.embeddings.create(
|
2024-06-14 02:21:53 +08:00
|
|
|
model=model_name,
|
|
|
|
input=input_texts,
|
|
|
|
encoding_format="float",
|
|
|
|
)
|
2024-12-24 17:54:30 +08:00
|
|
|
embeddings = EmbeddingResponse.model_validate(
|
|
|
|
embedding_response.model_dump(mode="json"))
|
|
|
|
|
2024-06-14 02:21:53 +08:00
|
|
|
assert embeddings.id is not None
|
|
|
|
assert len(embeddings.data) == 3
|
2025-02-28 16:56:44 +00:00
|
|
|
assert len(embeddings.data[0].embedding) == 384
|
2024-11-01 16:13:35 +08:00
|
|
|
assert embeddings.usage.completion_tokens == 0
|
2025-02-28 16:56:44 +00:00
|
|
|
assert embeddings.usage.prompt_tokens == 33
|
|
|
|
assert embeddings.usage.total_tokens == 33
|
2024-06-14 02:21:53 +08:00
|
|
|
|
2025-03-03 01:34:51 +00:00
|
|
|
# test list[list[int]]
|
2024-06-14 02:21:53 +08:00
|
|
|
input_tokens = [[4, 5, 7, 9, 20], [15, 29, 499], [24, 24, 24, 24, 24],
|
|
|
|
[25, 32, 64, 77]]
|
2024-12-24 17:54:30 +08:00
|
|
|
embedding_response = await client.embeddings.create(
|
2024-06-14 02:21:53 +08:00
|
|
|
model=model_name,
|
|
|
|
input=input_tokens,
|
|
|
|
encoding_format="float",
|
|
|
|
)
|
2024-12-24 17:54:30 +08:00
|
|
|
embeddings = EmbeddingResponse.model_validate(
|
|
|
|
embedding_response.model_dump(mode="json"))
|
|
|
|
|
2024-06-14 02:21:53 +08:00
|
|
|
assert embeddings.id is not None
|
|
|
|
assert len(embeddings.data) == 4
|
2025-02-28 16:56:44 +00:00
|
|
|
assert len(embeddings.data[0].embedding) == 384
|
2024-06-14 02:21:53 +08:00
|
|
|
assert embeddings.usage.completion_tokens == 0
|
|
|
|
assert embeddings.usage.prompt_tokens == 17
|
|
|
|
assert embeddings.usage.total_tokens == 17
|
2024-06-30 08:53:00 -07:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
2024-11-01 16:13:35 +08:00
|
|
|
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
|
|
|
async def test_conversation_embedding(server: RemoteOpenAIServer,
|
|
|
|
client: openai.AsyncOpenAI,
|
|
|
|
model_name: str):
|
|
|
|
messages = [{
|
|
|
|
"role": "user",
|
|
|
|
"content": "The cat sat on the mat.",
|
|
|
|
}, {
|
|
|
|
"role": "assistant",
|
|
|
|
"content": "A feline was resting on a rug.",
|
|
|
|
}, {
|
|
|
|
"role": "user",
|
|
|
|
"content": "Stars twinkle brightly in the night sky.",
|
|
|
|
}]
|
|
|
|
|
2024-12-24 17:54:30 +08:00
|
|
|
chat_response = requests.post(
|
|
|
|
server.url_for("v1/embeddings"),
|
|
|
|
json={
|
|
|
|
"model": model_name,
|
|
|
|
"messages": messages,
|
|
|
|
"encoding_format": "float",
|
|
|
|
},
|
|
|
|
)
|
2024-11-01 16:13:35 +08:00
|
|
|
chat_response.raise_for_status()
|
2024-12-24 17:54:30 +08:00
|
|
|
chat_embeddings = EmbeddingResponse.model_validate(chat_response.json())
|
2024-11-01 16:13:35 +08:00
|
|
|
|
|
|
|
tokenizer = get_tokenizer(tokenizer_name=model_name, tokenizer_mode="fast")
|
|
|
|
prompt = tokenizer.apply_chat_template(
|
|
|
|
messages,
|
|
|
|
chat_template=DUMMY_CHAT_TEMPLATE,
|
|
|
|
add_generation_prompt=True,
|
|
|
|
continue_final_message=False,
|
|
|
|
tokenize=False,
|
|
|
|
)
|
|
|
|
completion_response = await client.embeddings.create(
|
|
|
|
model=model_name,
|
|
|
|
input=prompt,
|
|
|
|
encoding_format="float",
|
|
|
|
# To be consistent with chat
|
|
|
|
extra_body={"add_special_tokens": False},
|
|
|
|
)
|
2024-12-24 17:54:30 +08:00
|
|
|
completion_embeddings = EmbeddingResponse.model_validate(
|
|
|
|
completion_response.model_dump(mode="json"))
|
2024-11-01 16:13:35 +08:00
|
|
|
|
2024-12-24 17:54:30 +08:00
|
|
|
assert chat_embeddings.id is not None
|
|
|
|
assert completion_embeddings.id is not None
|
|
|
|
assert chat_embeddings.created <= completion_embeddings.created
|
|
|
|
assert chat_embeddings.model_dump(
|
|
|
|
exclude={"id", "created"}) == (completion_embeddings.model_dump(
|
|
|
|
exclude={"id", "created"}))
|
2024-11-01 16:13:35 +08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
|
|
|
async def test_batch_base64_embedding(client: openai.AsyncOpenAI,
|
2024-06-30 08:53:00 -07:00
|
|
|
model_name: str):
|
|
|
|
input_texts = [
|
|
|
|
"Hello my name is",
|
|
|
|
"The best thing about vLLM is that it supports many different models"
|
|
|
|
]
|
|
|
|
|
2024-11-01 16:13:35 +08:00
|
|
|
responses_float = await client.embeddings.create(input=input_texts,
|
|
|
|
model=model_name,
|
|
|
|
encoding_format="float")
|
2025-04-08 22:14:46 -06:00
|
|
|
float_data = [d.embedding for d in responses_float.data]
|
2024-06-30 08:53:00 -07:00
|
|
|
|
2024-11-01 16:13:35 +08:00
|
|
|
responses_base64 = await client.embeddings.create(input=input_texts,
|
|
|
|
model=model_name,
|
|
|
|
encoding_format="base64")
|
2025-04-08 22:14:46 -06:00
|
|
|
base64_data = []
|
2024-06-30 08:53:00 -07:00
|
|
|
for data in responses_base64.data:
|
2025-04-08 22:14:46 -06:00
|
|
|
base64_data.append(
|
2024-06-30 08:53:00 -07:00
|
|
|
np.frombuffer(base64.b64decode(data.embedding),
|
2024-08-26 06:16:38 +03:00
|
|
|
dtype="float32").tolist())
|
2024-06-30 08:53:00 -07:00
|
|
|
|
2025-04-08 22:14:46 -06:00
|
|
|
check_embeddings_close(
|
|
|
|
embeddings_0_lst=float_data,
|
|
|
|
embeddings_1_lst=base64_data,
|
|
|
|
name_0="float",
|
|
|
|
name_1="base64",
|
|
|
|
)
|
2024-08-26 06:16:38 +03:00
|
|
|
|
|
|
|
# Default response is float32 decoded from base64 by OpenAI Client
|
2024-11-01 16:13:35 +08:00
|
|
|
responses_default = await client.embeddings.create(input=input_texts,
|
|
|
|
model=model_name)
|
2025-04-08 22:14:46 -06:00
|
|
|
default_data = [d.embedding for d in responses_default.data]
|
2024-08-26 06:16:38 +03:00
|
|
|
|
2025-04-08 22:14:46 -06:00
|
|
|
check_embeddings_close(
|
|
|
|
embeddings_0_lst=float_data,
|
|
|
|
embeddings_1_lst=default_data,
|
|
|
|
name_0="float",
|
|
|
|
name_1="default",
|
|
|
|
)
|
2024-10-04 15:31:40 -03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
2024-11-01 16:13:35 +08:00
|
|
|
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
|
|
|
async def test_single_embedding_truncation(client: openai.AsyncOpenAI,
|
|
|
|
model_name: str):
|
2024-10-04 15:31:40 -03:00
|
|
|
input_texts = [
|
|
|
|
"Como o Brasil pode fomentar o desenvolvimento de modelos de IA?",
|
|
|
|
]
|
|
|
|
|
|
|
|
# test single embedding
|
2024-12-24 17:54:30 +08:00
|
|
|
embedding_response = await client.embeddings.create(
|
2024-10-04 15:31:40 -03:00
|
|
|
model=model_name,
|
|
|
|
input=input_texts,
|
|
|
|
extra_body={"truncate_prompt_tokens": 10})
|
2024-12-24 17:54:30 +08:00
|
|
|
embeddings = EmbeddingResponse.model_validate(
|
|
|
|
embedding_response.model_dump(mode="json"))
|
|
|
|
|
2024-10-04 15:31:40 -03:00
|
|
|
assert embeddings.id is not None
|
|
|
|
assert len(embeddings.data) == 1
|
2025-02-28 16:56:44 +00:00
|
|
|
assert len(embeddings.data[0].embedding) == 384
|
2024-10-04 15:31:40 -03:00
|
|
|
assert embeddings.usage.completion_tokens == 0
|
|
|
|
assert embeddings.usage.prompt_tokens == 10
|
|
|
|
assert embeddings.usage.total_tokens == 10
|
|
|
|
|
|
|
|
input_tokens = [
|
|
|
|
1, 24428, 289, 18341, 26165, 285, 19323, 283, 289, 26789, 3871, 28728,
|
|
|
|
9901, 340, 2229, 385, 340, 315, 28741, 28804, 2
|
|
|
|
]
|
2024-12-24 17:54:30 +08:00
|
|
|
embedding_response = await client.embeddings.create(
|
2024-10-04 15:31:40 -03:00
|
|
|
model=model_name,
|
|
|
|
input=input_tokens,
|
|
|
|
extra_body={"truncate_prompt_tokens": 10})
|
2024-12-24 17:54:30 +08:00
|
|
|
embeddings = EmbeddingResponse.model_validate(
|
|
|
|
embedding_response.model_dump(mode="json"))
|
2024-10-04 15:31:40 -03:00
|
|
|
|
|
|
|
assert embeddings.id is not None
|
|
|
|
assert len(embeddings.data) == 1
|
2025-02-28 16:56:44 +00:00
|
|
|
assert len(embeddings.data[0].embedding) == 384
|
2024-10-04 15:31:40 -03:00
|
|
|
assert embeddings.usage.completion_tokens == 0
|
|
|
|
assert embeddings.usage.prompt_tokens == 10
|
|
|
|
assert embeddings.usage.total_tokens == 10
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
2024-11-01 16:13:35 +08:00
|
|
|
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
|
|
|
async def test_single_embedding_truncation_invalid(client: openai.AsyncOpenAI,
|
|
|
|
model_name: str):
|
2024-10-04 15:31:40 -03:00
|
|
|
input_texts = [
|
|
|
|
"Como o Brasil pode fomentar o desenvolvimento de modelos de IA?",
|
|
|
|
]
|
|
|
|
|
|
|
|
with pytest.raises(openai.BadRequestError):
|
2024-12-24 17:54:30 +08:00
|
|
|
response = await client.embeddings.create(
|
2024-10-04 15:31:40 -03:00
|
|
|
model=model_name,
|
|
|
|
input=input_texts,
|
|
|
|
extra_body={"truncate_prompt_tokens": 8193})
|
2024-12-24 17:54:30 +08:00
|
|
|
assert "error" in response.object
|
2024-10-04 15:31:40 -03:00
|
|
|
assert "truncate_prompt_tokens value is greater than max_model_len. "\
|
2024-12-24 17:54:30 +08:00
|
|
|
"Please, select a smaller truncation size." in response.message
|