2025-02-02 14:58:18 -05:00
|
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2023-12-13 12:28:13 -08:00
|
|
|
|
"""Compare the outputs of HF and vLLM for Mistral models using greedy sampling.
|
|
|
|
|
|
2024-03-29 13:06:40 +09:00
|
|
|
|
Run `pytest tests/models/test_mistral.py`.
|
2023-12-13 12:28:13 -08:00
|
|
|
|
"""
|
2024-11-15 01:42:49 +01:00
|
|
|
|
import copy
|
2024-12-18 03:34:08 -03:00
|
|
|
|
import json
|
2024-11-15 01:42:49 +01:00
|
|
|
|
|
2024-12-18 03:34:08 -03:00
|
|
|
|
import jsonschema
|
|
|
|
|
import jsonschema.exceptions
|
2023-12-13 12:28:13 -08:00
|
|
|
|
import pytest
|
|
|
|
|
|
2024-11-15 01:42:49 +01:00
|
|
|
|
from vllm.entrypoints.openai.tool_parsers.mistral_tool_parser import ( # noqa
|
|
|
|
|
MistralToolParser)
|
2024-12-18 03:34:08 -03:00
|
|
|
|
from vllm.sampling_params import GuidedDecodingParams, SamplingParams
|
2024-09-17 19:50:37 +02:00
|
|
|
|
|
2024-09-14 01:20:06 +08:00
|
|
|
|
from ...utils import check_logprobs_close
|
2024-05-09 00:44:35 +09:00
|
|
|
|
|
2023-12-13 12:28:13 -08:00
|
|
|
|
MODELS = [
|
2024-12-18 03:34:08 -03:00
|
|
|
|
"mistralai/Mistral-7B-Instruct-v0.3",
|
2023-12-13 12:28:13 -08:00
|
|
|
|
]
|
|
|
|
|
|
2024-11-05 16:02:23 -05:00
|
|
|
|
MISTRAL_FORMAT_MODELS = [
|
|
|
|
|
"mistralai/Mistral-7B-Instruct-v0.3",
|
2024-11-05 21:28:29 -07:00
|
|
|
|
# uses the v3-Tekken tokenizer
|
|
|
|
|
"mistralai/Ministral-8B-Instruct-2410",
|
|
|
|
|
# Mistral-Nemo is to big for CI, but passes locally
|
|
|
|
|
# "mistralai/Mistral-Nemo-Instruct-2407"
|
2024-11-05 16:02:23 -05:00
|
|
|
|
]
|
|
|
|
|
|
2024-09-17 19:50:37 +02:00
|
|
|
|
SAMPLING_PARAMS = SamplingParams(max_tokens=512, temperature=0.0, logprobs=5)
|
2024-09-20 23:33:03 +02:00
|
|
|
|
SYMBOLIC_LANG_PROMPTS = [
|
|
|
|
|
"勇敢な船乗りについての詩を書く", # japanese
|
|
|
|
|
"寫一首關於勇敢的水手的詩", # chinese
|
2024-11-05 21:28:29 -07:00
|
|
|
|
"ပုံပြင်လေးပြောပြပါ်:\n", # burmese
|
|
|
|
|
"Repeat the phrase 'URGENCY🌶️':\nURGENCY🌶️\nURGENCY🌶️\n", # see https://github.com/vllm-project/vllm/pull/9625
|
2024-09-20 23:33:03 +02:00
|
|
|
|
]
|
2024-09-17 19:50:37 +02:00
|
|
|
|
|
|
|
|
|
# for function calling
|
|
|
|
|
TOOLS = [{
|
|
|
|
|
"type": "function",
|
|
|
|
|
"function": {
|
|
|
|
|
"name": "get_current_weather",
|
|
|
|
|
"description": "Get the current weather in a given location",
|
|
|
|
|
"parameters": {
|
|
|
|
|
"type": "object",
|
|
|
|
|
"properties": {
|
|
|
|
|
"city": {
|
|
|
|
|
"type":
|
|
|
|
|
"string",
|
|
|
|
|
"description":
|
|
|
|
|
"The city to find the weather for, e.g. 'San Francisco'"
|
|
|
|
|
},
|
|
|
|
|
"state": {
|
|
|
|
|
"type":
|
|
|
|
|
"string",
|
|
|
|
|
"description":
|
|
|
|
|
"the two-letter abbreviation for the state that the city is"
|
|
|
|
|
" in, e.g. 'CA' which would mean 'California'"
|
|
|
|
|
},
|
|
|
|
|
"unit": {
|
|
|
|
|
"type": "string",
|
|
|
|
|
"description": "The unit to fetch the temperature in",
|
|
|
|
|
"enum": ["celsius", "fahrenheit"]
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
"required": ["city", "state", "unit"]
|
|
|
|
|
}
|
2024-11-15 01:42:49 +01:00
|
|
|
|
},
|
|
|
|
|
}, {
|
|
|
|
|
"type": "function",
|
|
|
|
|
"function": {
|
|
|
|
|
"name": "rewrite",
|
|
|
|
|
"description": "Rewrites text",
|
|
|
|
|
"parameters": {
|
|
|
|
|
"type": "object",
|
|
|
|
|
"required": [],
|
|
|
|
|
"properties": {
|
|
|
|
|
"text": {
|
|
|
|
|
"type": "string",
|
|
|
|
|
"description": "The input text to rewrite."
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-09-17 19:50:37 +02:00
|
|
|
|
}
|
|
|
|
|
}]
|
2024-11-15 01:42:49 +01:00
|
|
|
|
MSGS = [
|
|
|
|
|
{
|
|
|
|
|
"role": "system",
|
|
|
|
|
"content": "You are an assistant."
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
"role":
|
|
|
|
|
"user",
|
|
|
|
|
"content":
|
|
|
|
|
"Could you please rewrite the below article? \n\n My English needs improvving, maybe I make errors." # noqa
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
"role":
|
|
|
|
|
"assistant",
|
|
|
|
|
"content":
|
|
|
|
|
"",
|
|
|
|
|
"tool_calls": [{
|
|
|
|
|
"id": "bbc5b7ede",
|
|
|
|
|
"type": "function",
|
|
|
|
|
"function": {
|
|
|
|
|
"name":
|
|
|
|
|
"rewrite",
|
|
|
|
|
"arguments":
|
|
|
|
|
'{\"text\":\"My English needs improvving, maybe I make errors.\"}' # noqa
|
|
|
|
|
}
|
|
|
|
|
}]
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
"role": "tool",
|
|
|
|
|
"content":
|
|
|
|
|
"{\"action\":\"rewrite\",\"outcome\":\"My English needs improving, maybe I make errors.\"}", # noqa
|
|
|
|
|
"tool_call_id": "bbc5b7ede",
|
|
|
|
|
"name": "rewrite"
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
"role": "assistant",
|
|
|
|
|
"content": "---\n\nMy English needs improving, maybe I make errors"
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
"role":
|
|
|
|
|
"user",
|
|
|
|
|
"content": ("Can you tell me what the temperate"
|
|
|
|
|
" will be in Dallas, in fahrenheit?")
|
|
|
|
|
}
|
|
|
|
|
]
|
2024-09-17 19:50:37 +02:00
|
|
|
|
|
2024-12-18 03:34:08 -03:00
|
|
|
|
SAMPLE_JSON_SCHEMA = {
|
|
|
|
|
"type": "object",
|
|
|
|
|
"properties": {
|
|
|
|
|
"name": {
|
|
|
|
|
"type": "string"
|
|
|
|
|
},
|
|
|
|
|
"age": {
|
|
|
|
|
"type": "integer"
|
|
|
|
|
},
|
|
|
|
|
"skills": {
|
|
|
|
|
"type": "array",
|
|
|
|
|
"items": {
|
|
|
|
|
"type": "string",
|
|
|
|
|
"maxLength": 10
|
|
|
|
|
},
|
|
|
|
|
"minItems": 3
|
|
|
|
|
},
|
|
|
|
|
"work_history": {
|
|
|
|
|
"type": "array",
|
|
|
|
|
"items": {
|
|
|
|
|
"type": "object",
|
|
|
|
|
"properties": {
|
|
|
|
|
"company": {
|
|
|
|
|
"type": "string"
|
|
|
|
|
},
|
|
|
|
|
"duration": {
|
|
|
|
|
"type": "number"
|
|
|
|
|
},
|
|
|
|
|
"position": {
|
|
|
|
|
"type": "string"
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
"required": ["company", "position"]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
"required": ["name", "age", "skills", "work_history"]
|
|
|
|
|
}
|
|
|
|
|
|
2023-12-13 12:28:13 -08:00
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("model", MODELS)
|
|
|
|
|
@pytest.mark.parametrize("dtype", ["bfloat16"])
|
2024-05-09 00:44:35 +09:00
|
|
|
|
@pytest.mark.parametrize("max_tokens", [64])
|
|
|
|
|
@pytest.mark.parametrize("num_logprobs", [5])
|
2023-12-13 12:28:13 -08:00
|
|
|
|
def test_models(
|
|
|
|
|
hf_runner,
|
|
|
|
|
vllm_runner,
|
2024-05-09 00:44:35 +09:00
|
|
|
|
example_prompts,
|
2023-12-13 12:28:13 -08:00
|
|
|
|
model: str,
|
|
|
|
|
dtype: str,
|
|
|
|
|
max_tokens: int,
|
2024-05-09 00:44:35 +09:00
|
|
|
|
num_logprobs: int,
|
2023-12-13 12:28:13 -08:00
|
|
|
|
) -> None:
|
2024-05-09 00:44:35 +09:00
|
|
|
|
# TODO(sang): Sliding window should be tested separately.
|
2024-06-07 22:31:32 -07:00
|
|
|
|
with hf_runner(model, dtype=dtype) as hf_model:
|
|
|
|
|
hf_outputs = hf_model.generate_greedy_logprobs_limit(
|
|
|
|
|
example_prompts, max_tokens, num_logprobs)
|
2023-12-13 12:28:13 -08:00
|
|
|
|
|
2024-08-27 14:40:02 +02:00
|
|
|
|
with vllm_runner(model, dtype=dtype,
|
|
|
|
|
tokenizer_mode="mistral") as vllm_model:
|
2024-06-08 01:59:20 -07:00
|
|
|
|
vllm_outputs = vllm_model.generate_greedy_logprobs(
|
|
|
|
|
example_prompts, max_tokens, num_logprobs)
|
2024-08-27 14:40:02 +02:00
|
|
|
|
|
2024-05-09 00:44:35 +09:00
|
|
|
|
check_logprobs_close(
|
|
|
|
|
outputs_0_lst=hf_outputs,
|
|
|
|
|
outputs_1_lst=vllm_outputs,
|
|
|
|
|
name_0="hf",
|
|
|
|
|
name_1="vllm",
|
|
|
|
|
)
|
2024-09-07 01:02:05 +02:00
|
|
|
|
|
|
|
|
|
|
2025-03-16 22:06:43 -04:00
|
|
|
|
@pytest.mark.skip("RE-ENABLE: test is currently failing on main.")
|
2024-11-05 16:02:23 -05:00
|
|
|
|
@pytest.mark.parametrize("model", MISTRAL_FORMAT_MODELS)
|
2024-09-07 01:02:05 +02:00
|
|
|
|
@pytest.mark.parametrize("dtype", ["bfloat16"])
|
|
|
|
|
@pytest.mark.parametrize("max_tokens", [64])
|
|
|
|
|
@pytest.mark.parametrize("num_logprobs", [5])
|
|
|
|
|
def test_mistral_format(
|
|
|
|
|
vllm_runner,
|
|
|
|
|
example_prompts,
|
|
|
|
|
model: str,
|
|
|
|
|
dtype: str,
|
|
|
|
|
max_tokens: int,
|
|
|
|
|
num_logprobs: int,
|
|
|
|
|
) -> None:
|
|
|
|
|
with vllm_runner(
|
|
|
|
|
model,
|
|
|
|
|
dtype=dtype,
|
|
|
|
|
tokenizer_mode="mistral",
|
|
|
|
|
load_format="mistral",
|
|
|
|
|
config_format="mistral",
|
|
|
|
|
) as mistral_format_model:
|
|
|
|
|
mistral_format_outputs = mistral_format_model.generate_greedy_logprobs(
|
|
|
|
|
example_prompts, max_tokens, num_logprobs)
|
|
|
|
|
|
2025-03-15 01:02:20 -04:00
|
|
|
|
with vllm_runner(
|
|
|
|
|
model,
|
|
|
|
|
dtype=dtype,
|
|
|
|
|
tokenizer_mode="auto",
|
|
|
|
|
load_format="safetensors",
|
|
|
|
|
config_format="hf",
|
|
|
|
|
) as hf_format_model:
|
|
|
|
|
hf_format_outputs = hf_format_model.generate_greedy_logprobs(
|
|
|
|
|
example_prompts, max_tokens, num_logprobs)
|
|
|
|
|
|
2024-09-07 01:02:05 +02:00
|
|
|
|
check_logprobs_close(
|
|
|
|
|
outputs_0_lst=hf_format_outputs,
|
|
|
|
|
outputs_1_lst=mistral_format_outputs,
|
|
|
|
|
name_0="hf",
|
|
|
|
|
name_1="mistral",
|
|
|
|
|
)
|
2024-09-17 19:50:37 +02:00
|
|
|
|
|
|
|
|
|
|
2024-11-05 16:02:23 -05:00
|
|
|
|
@pytest.mark.parametrize("model", MISTRAL_FORMAT_MODELS)
|
2024-09-20 23:33:03 +02:00
|
|
|
|
@pytest.mark.parametrize("dtype", ["bfloat16"])
|
|
|
|
|
def test_mistral_symbolic_languages(
|
2024-11-05 16:02:23 -05:00
|
|
|
|
vllm_runner,
|
2024-09-20 23:33:03 +02:00
|
|
|
|
model: str,
|
|
|
|
|
dtype: str,
|
|
|
|
|
) -> None:
|
2024-11-05 16:02:23 -05:00
|
|
|
|
with vllm_runner(model,
|
|
|
|
|
dtype=dtype,
|
|
|
|
|
max_model_len=8192,
|
|
|
|
|
tokenizer_mode="mistral",
|
|
|
|
|
config_format="mistral",
|
|
|
|
|
load_format="mistral") as vllm_model:
|
|
|
|
|
for prompt in SYMBOLIC_LANG_PROMPTS:
|
|
|
|
|
msg = {"role": "user", "content": prompt}
|
|
|
|
|
outputs = vllm_model.model.chat([msg],
|
|
|
|
|
sampling_params=SAMPLING_PARAMS)
|
|
|
|
|
assert "<EFBFBD>" not in outputs[0].outputs[0].text.strip()
|
2024-09-20 23:33:03 +02:00
|
|
|
|
|
|
|
|
|
|
2025-03-15 01:02:20 -04:00
|
|
|
|
@pytest.mark.skip("RE-ENABLE: test is currently failing on main.")
|
2024-09-17 19:50:37 +02:00
|
|
|
|
@pytest.mark.parametrize("dtype", ["bfloat16"])
|
2024-11-05 16:02:23 -05:00
|
|
|
|
@pytest.mark.parametrize("model",
|
|
|
|
|
MISTRAL_FORMAT_MODELS) # v1 can't do func calling
|
2024-09-17 19:50:37 +02:00
|
|
|
|
def test_mistral_function_calling(
|
|
|
|
|
vllm_runner,
|
|
|
|
|
model: str,
|
|
|
|
|
dtype: str,
|
|
|
|
|
) -> None:
|
|
|
|
|
with vllm_runner(model,
|
|
|
|
|
dtype=dtype,
|
|
|
|
|
tokenizer_mode="mistral",
|
|
|
|
|
config_format="mistral",
|
|
|
|
|
load_format="mistral") as vllm_model:
|
2024-11-15 01:42:49 +01:00
|
|
|
|
|
|
|
|
|
msgs = copy.deepcopy(MSGS)
|
|
|
|
|
outputs = vllm_model.model.chat(msgs,
|
2024-09-17 19:50:37 +02:00
|
|
|
|
tools=TOOLS,
|
|
|
|
|
sampling_params=SAMPLING_PARAMS)
|
|
|
|
|
|
2024-11-15 01:42:49 +01:00
|
|
|
|
tokenizer = vllm_model.model.get_tokenizer()
|
|
|
|
|
tool_parser = MistralToolParser(tokenizer)
|
|
|
|
|
|
|
|
|
|
model_output = outputs[0].outputs[0].text.strip()
|
|
|
|
|
assert model_output.startswith(tool_parser.bot_token), model_output
|
|
|
|
|
parsed_message = tool_parser.extract_tool_calls(model_output, None)
|
|
|
|
|
|
|
|
|
|
assert parsed_message.tools_called
|
|
|
|
|
assert parsed_message.tool_calls[0].id == "0UAqFzWsD"
|
|
|
|
|
assert parsed_message.tool_calls[
|
|
|
|
|
0].function.name == "get_current_weather"
|
|
|
|
|
assert parsed_message.tool_calls[
|
|
|
|
|
0].function.arguments == '{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}' # noqa
|
|
|
|
|
assert parsed_message.content is None
|
2024-12-18 03:34:08 -03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("model", MODELS)
|
|
|
|
|
@pytest.mark.parametrize("guided_backend",
|
|
|
|
|
["outlines", "lm-format-enforcer", "xgrammar"])
|
|
|
|
|
def test_mistral_guided_decoding(
|
|
|
|
|
vllm_runner,
|
|
|
|
|
model: str,
|
|
|
|
|
guided_backend: str,
|
|
|
|
|
) -> None:
|
|
|
|
|
with vllm_runner(model, dtype='bfloat16',
|
|
|
|
|
tokenizer_mode="mistral") as vllm_model:
|
|
|
|
|
|
|
|
|
|
guided_decoding = GuidedDecodingParams(json=SAMPLE_JSON_SCHEMA,
|
|
|
|
|
backend=guided_backend)
|
|
|
|
|
params = SamplingParams(max_tokens=512,
|
|
|
|
|
temperature=0.7,
|
|
|
|
|
guided_decoding=guided_decoding)
|
|
|
|
|
|
|
|
|
|
messages = [{
|
|
|
|
|
"role": "system",
|
|
|
|
|
"content": "you are a helpful assistant"
|
|
|
|
|
}, {
|
|
|
|
|
"role":
|
|
|
|
|
"user",
|
|
|
|
|
"content":
|
|
|
|
|
f"Give an example JSON for an employee profile that "
|
|
|
|
|
f"fits this schema: {SAMPLE_JSON_SCHEMA}"
|
|
|
|
|
}]
|
|
|
|
|
outputs = vllm_model.model.chat(messages, sampling_params=params)
|
|
|
|
|
|
|
|
|
|
generated_text = outputs[0].outputs[0].text
|
|
|
|
|
json_response = json.loads(generated_text)
|
|
|
|
|
assert outputs is not None
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
jsonschema.validate(instance=json_response,
|
|
|
|
|
schema=SAMPLE_JSON_SCHEMA)
|
|
|
|
|
except jsonschema.exceptions.ValidationError:
|
|
|
|
|
pytest.fail("Generated response is not valid with JSON schema")
|