123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857 |
- # imports for guided decoding tests
- import json
- import re
- from typing import List
- import jsonschema
- import openai # use the official client for correctness check
- import pytest
- import torch
- from openai import BadRequestError
- from ...utils import RemoteOpenAIServer
- from .test_completion import zephyr_lora_added_tokens_files # noqa: F401
- from .test_completion import zephyr_lora_files # noqa: F401
- # any model with a chat template should work here
- MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
- # technically this needs Mistral-7B-v0.1 as base, but we're not testing
- # generation quality here
- LORA_NAME = "alpindale/zephyr-7b-beta-lora"
- @pytest.fixture(scope="module")
- def server(zephyr_lora_files, zephyr_lora_added_tokens_files): # noqa: F811
- args = [
- # use half precision for speed and memory savings in CI environment
- "--dtype",
- "bfloat16",
- "--max-model-len",
- "8192",
- "--enforce-eager",
- # lora config below
- "--enable-lora",
- "--lora-modules",
- f"zephyr-lora={zephyr_lora_files}",
- f"zephyr-lora2={zephyr_lora_added_tokens_files}",
- "--max-lora-rank",
- "64",
- "--max-cpu-loras",
- "2",
- "--max-num-seqs",
- "128",
- ]
- with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
- yield remote_server
- @pytest.fixture(scope="module")
- def client(server):
- return server.get_async_client()
- @pytest.mark.asyncio
- @pytest.mark.parametrize(
- # first test base model, then test loras
- "model_name",
- [MODEL_NAME, "zephyr-lora", "zephyr-lora2"],
- )
- async def test_no_logprobs_chat(client: openai.AsyncOpenAI, model_name: str):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role": "user",
- "content": "what is 1+1?"
- }]
- chat_completion = await client.chat.completions.create(model=model_name,
- messages=messages,
- max_tokens=5,
- temperature=0.0,
- logprobs=False)
- choice = chat_completion.choices[0]
- assert choice.logprobs is None
- @pytest.mark.asyncio
- @pytest.mark.parametrize(
- # just test 1 lora hereafter
- "model_name",
- [MODEL_NAME, "zephyr-lora"],
- )
- async def test_zero_logprobs_chat(client: openai.AsyncOpenAI, model_name: str):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role": "user",
- "content": "what is 1+1?"
- }]
- chat_completion = await client.chat.completions.create(model=model_name,
- messages=messages,
- max_tokens=5,
- temperature=0.0,
- logprobs=True,
- top_logprobs=0)
- choice = chat_completion.choices[0]
- assert choice.logprobs is not None
- assert choice.logprobs.content is not None
- assert len(choice.logprobs.content[0].top_logprobs) == 0
- @pytest.mark.asyncio
- @pytest.mark.parametrize(
- "model_name",
- [MODEL_NAME, "zephyr-lora"],
- )
- async def test_some_logprobs_chat(client: openai.AsyncOpenAI, model_name: str):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role": "user",
- "content": "what is 1+1?"
- }]
- chat_completion = await client.chat.completions.create(model=model_name,
- messages=messages,
- max_tokens=5,
- temperature=0.0,
- logprobs=True,
- top_logprobs=5)
- choice = chat_completion.choices[0]
- assert choice.logprobs is not None
- assert choice.logprobs.content is not None
- assert len(choice.logprobs.content[0].top_logprobs) == 5
- @pytest.mark.asyncio
- @pytest.mark.parametrize(
- "model_name",
- [MODEL_NAME, "zephyr-lora"],
- )
- async def test_too_many_chat_logprobs(client: openai.AsyncOpenAI,
- model_name: str):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role": "user",
- "content": "what is 1+1?"
- }]
- # Default max_logprobs is 20, so this should raise an error
- with pytest.raises((openai.BadRequestError, openai.APIError)):
- stream = await client.chat.completions.create(model=model_name,
- messages=messages,
- max_tokens=10,
- logprobs=True,
- top_logprobs=21,
- stream=True)
- async for chunk in stream:
- ...
- with pytest.raises(openai.BadRequestError):
- await client.chat.completions.create(model=model_name,
- messages=messages,
- max_tokens=10,
- logprobs=True,
- top_logprobs=30,
- stream=False)
- # the server should still work afterwards
- chat_completion = await client.chat.completions.create(model=model_name,
- messages=messages,
- max_tokens=10,
- stream=False)
- message = chat_completion.choices[0].message
- assert message.content is not None and len(message.content) >= 0
- @pytest.mark.asyncio
- @pytest.mark.parametrize(
- "model_name",
- [MODEL_NAME, "zephyr-lora"],
- )
- async def test_single_chat_session(client: openai.AsyncOpenAI,
- model_name: str):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role": "user",
- "content": "what is 1+1?"
- }]
- # test single completion
- chat_completion = await client.chat.completions.create(model=model_name,
- messages=messages,
- max_tokens=10,
- logprobs=True,
- top_logprobs=5)
- assert chat_completion.id is not None
- assert len(chat_completion.choices) == 1
- choice = chat_completion.choices[0]
- assert choice.finish_reason == "length"
- assert chat_completion.usage == openai.types.CompletionUsage(
- completion_tokens=10, prompt_tokens=37, total_tokens=47)
- message = choice.message
- assert message.content is not None and len(message.content) >= 10
- assert message.role == "assistant"
- messages.append({"role": "assistant", "content": message.content})
- # test multi-turn dialogue
- messages.append({"role": "user", "content": "express your result in json"})
- chat_completion = await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- )
- message = chat_completion.choices[0].message
- assert message.content is not None and len(message.content) >= 0
- @pytest.mark.asyncio
- @pytest.mark.parametrize(
- # just test 1 lora hereafter
- "model_name",
- [MODEL_NAME, "zephyr-lora"],
- )
- async def test_chat_streaming(client: openai.AsyncOpenAI, model_name: str):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role": "user",
- "content": "what is 1+1?"
- }]
- # test single completion
- chat_completion = await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- temperature=0.0,
- )
- output = chat_completion.choices[0].message.content
- stop_reason = chat_completion.choices[0].finish_reason
- # test streaming
- stream = await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- temperature=0.0,
- stream=True,
- )
- chunks: List[str] = []
- finish_reason_count = 0
- async for chunk in stream:
- delta = chunk.choices[0].delta
- if delta.role:
- assert delta.role == "assistant"
- if delta.content:
- chunks.append(delta.content)
- if chunk.choices[0].finish_reason is not None:
- finish_reason_count += 1
- # finish reason should only return in last block
- assert finish_reason_count == 1
- assert chunk.choices[0].finish_reason == stop_reason
- assert delta.content
- assert "".join(chunks) == output
- @pytest.mark.asyncio
- @pytest.mark.parametrize(
- "model_name",
- ["HuggingFaceH4/zephyr-7b-beta", "zephyr-lora"],
- )
- async def test_chat_completion_stream_options(client: openai.AsyncOpenAI,
- model_name: str):
- messages = [{
- "role": "system",
- "content": "You are a helpful assistant."
- }, {
- "role": "user",
- "content": "What is the capital of France?"
- }]
- # Test stream=True, stream_options={"include_usage": False}
- stream = await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- temperature=0.0,
- stream=True,
- stream_options={"include_usage": False})
- async for chunk in stream:
- assert chunk.usage is None
- # Test stream=True, stream_options={"include_usage": True,
- # "continuous_usage_stats": False}}
- stream = await client.chat.completions.create(model=model_name,
- messages=messages,
- max_tokens=10,
- temperature=0.0,
- stream=True,
- stream_options={
- "include_usage":
- True,
- "continuous_usage_stats":
- False
- })
- async for chunk in stream:
- if chunk.choices[0].finish_reason is None:
- assert chunk.usage is None
- else:
- assert chunk.usage is None
- final_chunk = await stream.__anext__()
- assert final_chunk.usage is not None
- assert final_chunk.usage.prompt_tokens > 0
- assert final_chunk.usage.completion_tokens > 0
- assert final_chunk.usage.total_tokens == (
- final_chunk.usage.prompt_tokens +
- final_chunk.usage.completion_tokens)
- assert final_chunk.choices == []
- # Test stream=False, stream_options={"include_usage": None}
- with pytest.raises(BadRequestError):
- await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- temperature=0.0,
- stream=False,
- stream_options={"include_usage": None})
- # Test stream=False, stream_options={"include_usage": True}
- with pytest.raises(BadRequestError):
- await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- temperature=0.0,
- stream=False,
- stream_options={"include_usage": True})
- # Test stream=True, stream_options={"include_usage": True,
- # "continuous_usage_stats": True}
- stream = await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- temperature=0.0,
- stream=True,
- stream_options={
- "include_usage": True,
- "continuous_usage_stats": True
- },
- )
- async for chunk in stream:
- assert chunk.usage.prompt_tokens >= 0
- assert chunk.usage.completion_tokens >= 0
- assert chunk.usage.total_tokens == (chunk.usage.prompt_tokens +
- chunk.usage.completion_tokens)
- # NOTE: Not sure why, but when I place this after `test_guided_regex_chat`
- # (i.e. using the same ordering as in the Completions API tests), the test
- # will fail on the second `guided_decoding_backend` even when I swap their order
- # (ref: https://github.com/aphrodite-project/aphrodite/pull/5526#issuecomment-2173772256)
- @pytest.mark.asyncio
- @pytest.mark.parametrize("guided_decoding_backend",
- ["outlines", "lm-format-enforcer"])
- async def test_guided_choice_chat(client: openai.AsyncOpenAI,
- guided_decoding_backend: str,
- sample_guided_choice):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role":
- "user",
- "content":
- "The best language for type-safe systems programming is "
- }]
- chat_completion = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=10,
- extra_body=dict(guided_choice=sample_guided_choice,
- guided_decoding_backend=guided_decoding_backend))
- choice1 = chat_completion.choices[0].message.content
- assert choice1 in sample_guided_choice
- messages.append({"role": "assistant", "content": choice1})
- messages.append({
- "role": "user",
- "content": "I disagree, pick another one"
- })
- chat_completion = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=10,
- extra_body=dict(guided_choice=sample_guided_choice,
- guided_decoding_backend=guided_decoding_backend))
- choice2 = chat_completion.choices[0].message.content
- assert choice2 in sample_guided_choice
- assert choice1 != choice2
- @pytest.mark.asyncio
- @pytest.mark.parametrize("guided_decoding_backend",
- ["outlines", "lm-format-enforcer"])
- async def test_guided_json_chat(client: openai.AsyncOpenAI,
- guided_decoding_backend: str,
- sample_json_schema):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role":
- "user",
- "content":
- f"Give an example JSON for an employee profile that "
- f"fits this schema: {sample_json_schema}"
- }]
- chat_completion = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=1000,
- extra_body=dict(guided_json=sample_json_schema,
- guided_decoding_backend=guided_decoding_backend))
- message = chat_completion.choices[0].message
- assert message.content is not None
- json1 = json.loads(message.content)
- jsonschema.validate(instance=json1, schema=sample_json_schema)
- messages.append({"role": "assistant", "content": message.content})
- messages.append({
- "role":
- "user",
- "content":
- "Give me another one with a different name and age"
- })
- chat_completion = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=1000,
- extra_body=dict(guided_json=sample_json_schema,
- guided_decoding_backend=guided_decoding_backend))
- message = chat_completion.choices[0].message
- assert message.content is not None
- json2 = json.loads(message.content)
- jsonschema.validate(instance=json2, schema=sample_json_schema)
- assert json1["name"] != json2["name"]
- assert json1["age"] != json2["age"]
- @pytest.mark.asyncio
- @pytest.mark.parametrize("guided_decoding_backend",
- ["outlines", "lm-format-enforcer"])
- async def test_guided_regex_chat(client: openai.AsyncOpenAI,
- guided_decoding_backend: str, sample_regex):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role":
- "user",
- "content":
- f"Give an example IP address with this regex: {sample_regex}"
- }]
- chat_completion = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=20,
- extra_body=dict(guided_regex=sample_regex,
- guided_decoding_backend=guided_decoding_backend))
- ip1 = chat_completion.choices[0].message.content
- assert ip1 is not None
- assert re.fullmatch(sample_regex, ip1) is not None
- messages.append({"role": "assistant", "content": ip1})
- messages.append({"role": "user", "content": "Give me a different one"})
- chat_completion = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=20,
- extra_body=dict(guided_regex=sample_regex,
- guided_decoding_backend=guided_decoding_backend))
- ip2 = chat_completion.choices[0].message.content
- assert ip2 is not None
- assert re.fullmatch(sample_regex, ip2) is not None
- assert ip1 != ip2
- @pytest.mark.asyncio
- async def test_guided_decoding_type_error(client: openai.AsyncOpenAI):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role":
- "user",
- "content":
- "The best language for type-safe systems programming is "
- }]
- with pytest.raises(openai.BadRequestError):
- _ = await client.chat.completions.create(model=MODEL_NAME,
- messages=messages,
- extra_body=dict(guided_regex={
- 1: "Python",
- 2: "C++"
- }))
- @pytest.mark.asyncio
- @pytest.mark.parametrize("guided_decoding_backend",
- ["outlines", "lm-format-enforcer"])
- async def test_guided_choice_chat_logprobs(client: openai.AsyncOpenAI,
- guided_decoding_backend: str,
- sample_guided_choice):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role":
- "user",
- "content":
- "The best language for type-safe systems programming is "
- }]
- chat_completion = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=10,
- logprobs=True,
- top_logprobs=5,
- extra_body=dict(guided_choice=sample_guided_choice,
- guided_decoding_backend=guided_decoding_backend))
- assert chat_completion.choices[0].logprobs is not None
- assert chat_completion.choices[0].logprobs.content is not None
- top_logprobs = chat_completion.choices[0].logprobs.content[0].top_logprobs
- # -9999.0 is the minimum logprob returned by OpenAI
- for item in top_logprobs:
- assert item.logprob >= -9999.0, f"Failed (top_logprobs={top_logprobs})"
- @pytest.mark.asyncio
- @pytest.mark.parametrize("guided_decoding_backend",
- ["outlines", "lm-format-enforcer"])
- async def test_named_tool_use(client: openai.AsyncOpenAI,
- guided_decoding_backend: str,
- sample_json_schema):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role":
- "user",
- "content":
- f"Give an example JSON for an employee profile that "
- f"fits this schema: {sample_json_schema}"
- }]
- # non-streaming
- chat_completion = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=1000,
- tools=[{
- "type": "function",
- "function": {
- "name": "dummy_function_name",
- "description": "This is a dummy function",
- "parameters": sample_json_schema
- }
- }],
- tool_choice={
- "type": "function",
- "function": {
- "name": "dummy_function_name"
- }
- })
- message = chat_completion.choices[0].message
- assert len(message.content) == 0
- json_string = message.tool_calls[0].function.arguments
- json1 = json.loads(json_string)
- jsonschema.validate(instance=json1, schema=sample_json_schema)
- messages.append({"role": "assistant", "content": json_string})
- messages.append({
- "role":
- "user",
- "content":
- "Give me another one with a different name and age"
- })
- # streaming
- stream = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=1000,
- tools=[{
- "type": "function",
- "function": {
- "name": "dummy_function_name",
- "description": "This is a dummy function",
- "parameters": sample_json_schema
- }
- }],
- tool_choice={
- "type": "function",
- "function": {
- "name": "dummy_function_name"
- }
- },
- stream=True)
- output = []
- finish_reason_count = 0
- async for chunk in stream:
- delta = chunk.choices[0].delta
- if delta.role:
- assert delta.role == "assistant"
- assert delta.content is None or len(delta.content) == 0
- if delta.tool_calls:
- output.append(delta.tool_calls[0].function.arguments)
- if chunk.choices[0].finish_reason is not None:
- finish_reason_count += 1
- # finish reason should only return in last block
- assert finish_reason_count == 1
- json2 = json.loads("".join(output))
- jsonschema.validate(instance=json2, schema=sample_json_schema)
- assert json1["name"] != json2["name"]
- assert json1["age"] != json2["age"]
- @pytest.mark.asyncio
- @pytest.mark.parametrize("guided_decoding_backend", ["outlines"])
- async def test_required_tool_use_not_yet_supported(
- client: openai.AsyncOpenAI, guided_decoding_backend: str,
- sample_json_schema):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role":
- "user",
- "content":
- f"Give an example JSON for an employee profile that "
- f"fits this schema: {sample_json_schema}"
- }]
- with pytest.raises(openai.BadRequestError):
- await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=1000,
- tools=[{
- "type": "function",
- "function": {
- "name": "dummy_function_name",
- "description": "This is a dummy function",
- "parameters": sample_json_schema
- }
- }],
- tool_choice="required")
- with pytest.raises(openai.BadRequestError):
- await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=1000,
- tools=[{
- "type": "function",
- "function": {
- "name": "dummy_function_name",
- "description": "This is a dummy function",
- "parameters": sample_json_schema
- }
- }],
- tool_choice="auto")
- @pytest.mark.asyncio
- @pytest.mark.parametrize("guided_decoding_backend", ["outlines"])
- async def test_inconsistent_tool_choice_and_tools(client: openai.AsyncOpenAI,
- guided_decoding_backend: str,
- sample_json_schema):
- messages = [{
- "role": "system",
- "content": "you are a helpful assistant"
- }, {
- "role":
- "user",
- "content":
- f"Give an example JSON for an employee profile that "
- f"fits this schema: {sample_json_schema}"
- }]
- with pytest.raises(openai.BadRequestError):
- await client.chat.completions.create(model=MODEL_NAME,
- messages=messages,
- max_tokens=1000,
- tool_choice={
- "type": "function",
- "function": {
- "name":
- "dummy_function_name"
- }
- })
- with pytest.raises(openai.BadRequestError):
- await client.chat.completions.create(
- model=MODEL_NAME,
- messages=messages,
- max_tokens=1000,
- tools=[{
- "type": "function",
- "function": {
- "name": "dummy_function_name",
- "description": "This is a dummy function",
- "parameters": sample_json_schema
- }
- }],
- tool_choice={
- "type": "function",
- "function": {
- "name": "nondefined_function_name"
- }
- })
- @pytest.mark.asyncio
- async def test_response_format_json_object(client: openai.AsyncOpenAI):
- for _ in range(2):
- resp = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=[{
- "role":
- "user",
- "content": ('what is 1+1? please respond with a JSON object, '
- 'the format is {"result": 2}')
- }],
- response_format={"type": "json_object"})
- content = resp.choices[0].message.content
- assert content is not None
- loaded = json.loads(content)
- assert loaded == {"result": 2}, loaded
- @pytest.mark.asyncio
- async def test_complex_message_content(client: openai.AsyncOpenAI):
- resp = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=[{
- "role":
- "user",
- "content": [{
- "type":
- "text",
- "text":
- "what is 1+1? please provide the result without any other text."
- }]
- }],
- temperature=0,
- seed=0)
- content = resp.choices[0].message.content
- assert content == "2"
- @pytest.mark.asyncio
- async def test_custom_role(client: openai.AsyncOpenAI):
- # Not sure how the model handles custom roles so we just check that
- # both string and complex message content are handled in the same way
- resp1 = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=[{
- "role": "my-custom-role",
- "content": "what is 1+1?",
- }], # type: ignore
- temperature=0,
- seed=0)
- resp2 = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=[{
- "role": "my-custom-role",
- "content": [{
- "type": "text",
- "text": "what is 1+1?"
- }]
- }], # type: ignore
- temperature=0,
- seed=0)
- content1 = resp1.choices[0].message.content
- content2 = resp2.choices[0].message.content
- assert content1 == content2
- @pytest.mark.asyncio
- async def test_long_seed(client: openai.AsyncOpenAI):
- for seed in [
- torch.iinfo(torch.long).min - 1,
- torch.iinfo(torch.long).max + 1
- ]:
- with pytest.raises(BadRequestError) as exc_info:
- await client.chat.completions.create(
- model=MODEL_NAME,
- messages=[{
- "role": "system",
- "content": "You are a helpful assistant.",
- }],
- temperature=0,
- seed=seed)
- assert ("greater_than_equal" in exc_info.value.message
- or "less_than_equal" in exc_info.value.message)
- @pytest.mark.asyncio
- async def test_response_format_json_schema(client: openai.AsyncOpenAI):
- for _ in range(2):
- resp = await client.chat.completions.create(
- model=MODEL_NAME,
- messages=[{
- "role":
- "user",
- "content": ('what is 1+1? please respond with a JSON object, '
- 'the format is {"result": 2}')
- }],
- response_format={
- "type": "json_schema",
- "json_schema": {
- "name": "foo_test",
- "schema": {
- "type": "object",
- "properties": {
- "result": {
- "type": "integer"
- },
- },
- },
- }
- })
- content = resp.choices[0].message.content
- assert content is not None
- loaded = json.loads(content)
- assert loaded == {"result": 2}, loaded
|