[CI] Add test case with JSON schema using references + use xgrammar by default with OpenAI parse (#10935)
Signed-off-by: mgoin <michael@neuralmagic.com>
This commit is contained in:
parent
35bae114a8
commit
0064f697d3
@ -100,6 +100,45 @@ def sample_complex_json_schema():
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def sample_definition_json_schema():
|
||||||
|
return {
|
||||||
|
'$defs': {
|
||||||
|
'Step': {
|
||||||
|
'properties': {
|
||||||
|
'explanation': {
|
||||||
|
'title': 'Explanation',
|
||||||
|
'type': 'string'
|
||||||
|
},
|
||||||
|
'output': {
|
||||||
|
'title': 'Output',
|
||||||
|
'type': 'string'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'required': ['explanation', 'output'],
|
||||||
|
'title': 'Step',
|
||||||
|
'type': 'object'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'properties': {
|
||||||
|
'steps': {
|
||||||
|
'items': {
|
||||||
|
'$ref': '#/$defs/Step'
|
||||||
|
},
|
||||||
|
'title': 'Steps',
|
||||||
|
'type': 'array'
|
||||||
|
},
|
||||||
|
'final_answer': {
|
||||||
|
'title': 'Final Answer',
|
||||||
|
'type': 'string'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'required': ['steps', 'final_answer'],
|
||||||
|
'title': 'MathReasoning',
|
||||||
|
'type': 'object'
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def sample_guided_choice():
|
def sample_guided_choice():
|
||||||
return [
|
return [
|
||||||
|
@ -104,6 +104,34 @@ def test_guided_complex_json_completion(sample_complex_json_schema, llm):
|
|||||||
schema=sample_complex_json_schema)
|
schema=sample_complex_json_schema)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skip_global_cleanup
|
||||||
|
def test_guided_definition_json_completion(sample_definition_json_schema, llm):
|
||||||
|
sampling_params = SamplingParams(temperature=1.0,
|
||||||
|
max_tokens=1000,
|
||||||
|
guided_decoding=GuidedDecodingParams(
|
||||||
|
json=sample_definition_json_schema))
|
||||||
|
outputs = llm.generate(prompts=[
|
||||||
|
f"Give an example JSON for solving 8x + 7 = -23 "
|
||||||
|
f"that fits this schema: {sample_definition_json_schema}"
|
||||||
|
] * 2,
|
||||||
|
sampling_params=sampling_params,
|
||||||
|
use_tqdm=True)
|
||||||
|
|
||||||
|
assert outputs is not None
|
||||||
|
|
||||||
|
for output in outputs:
|
||||||
|
assert output is not None
|
||||||
|
assert isinstance(output, RequestOutput)
|
||||||
|
prompt = output.prompt
|
||||||
|
|
||||||
|
generated_text = output.outputs[0].text
|
||||||
|
assert generated_text is not None
|
||||||
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
||||||
|
output_json = json.loads(generated_text)
|
||||||
|
jsonschema.validate(instance=output_json,
|
||||||
|
schema=sample_definition_json_schema)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip_global_cleanup
|
@pytest.mark.skip_global_cleanup
|
||||||
def test_guided_choice_completion(sample_guided_choice, llm):
|
def test_guided_choice_completion(sample_guided_choice, llm):
|
||||||
sampling_params = SamplingParams(
|
sampling_params = SamplingParams(
|
||||||
|
@ -387,7 +387,7 @@ class ChatCompletionRequest(OpenAIBaseModel):
|
|||||||
assert json_schema is not None
|
assert json_schema is not None
|
||||||
self.guided_json = json_schema.json_schema
|
self.guided_json = json_schema.json_schema
|
||||||
if self.guided_decoding_backend is None:
|
if self.guided_decoding_backend is None:
|
||||||
self.guided_decoding_backend = "lm-format-enforcer"
|
self.guided_decoding_backend = "xgrammar"
|
||||||
|
|
||||||
guided_decoding = GuidedDecodingParams.from_optional(
|
guided_decoding = GuidedDecodingParams.from_optional(
|
||||||
json=self._get_guided_json_from_tool() or self.guided_json,
|
json=self._get_guided_json_from_tool() or self.guided_json,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user