From other code, I pulled out and made a Python function to address a knowledge and documentation shortcoming, and code itself is readable as documentation.
Or you can just use the function. Builds the correct structured response output format for JSON schema for either:
- the Responses API
textcontainer (for the request’stextparameter), or - the Chat Completions
response_formatcontainer, usingfor_chat_completions=True,
You can get a strict structured output parameter to use with only:
- your JSON schema,
- a name for it,
- an optional but recommended description of when it is useful.
Some of the worry taken off your back of how to contain and pass structured response schemas. You just need to get the actual “schema”, and then the rest of producing the “container” specific to the endpoint is done for you.
(expand or copy to get the full impact)
'''schema container helper, with API call demo'''
from typing import Any
def build_schema_parameter(
#self, # for within a class
*,
use_json_schema: bool = True,
schema_name: str,
schema_description: str | None = None,
schema_strict: bool = True,
schema: dict[str, Any],
verbosity: str | None = None,
for_chat_completions: bool = False,
) -> dict[str, Any]:
"""
Build the correct structured response output format for JSON schema for either:
- the Responses API `text` container (for the request's `text` parameter), or
- the Chat Completions `response_format` container, using `for_chat_completions=True`),
with a plain text fallback by a switch. (For `json_object` type, no method is provided).
Behavior:
- When `use_json_schema=True`, emits a JSON Schema format container; otherwise emits a
plain `"type": "text"` container appropriate to the chosen endpoint.
- `schema_description` is used only by the Responses API and is ignored for Chat Completions.
(you add a description to cc schema itself, which responses tolerates)
- `verbosity` is applied only to the Responses `text` object and is **not** propagated to
Chat Completions (that endpoint has its own independent parameter for verbosity).
- `schema_name` should be short and match `[A-Za-z0-9_-]{1,64}` - the AI emits this.
Args (passed to API endpoint):
schema_name: Name for the JSON schema format container.
schema: The actual JSON Schema object describing the output shape to AI. Must be JSON-serializable.
schema_description: Description for the JSON schema container (optional).
schema_strict: Whether the schema is enforced strictly by the model. Defaults to True in this helper.
verbosity: Optional verbosity string; included alongside the `format` block for the Responses
`text` object only. It is **not** passed to Chat Completions (use that API's different parameter).
Args (for control):
use_json_schema: When True (default), emit a JSON Schema format container; when False,
emit the plain text format for the chosen endpoint. Quick off switch.
for_chat_completions: When True, build and return the Chat Completions `response_format`
object; otherwise (default), build and return the Responses `text` object.
Returns:
dict[str, Any]: A dict suitable to place directly under:
- `text` (Responses API) when `for_chat_completions=False`, or
- `response_format` (Chat Completions) when `for_chat_completions=True`.
When `use_json_schema=False`, the return is the plain text container for the chosen endpoint.
Raises:
ValueError: If required parameters are missing/invalid when `use_json_schema=True`, or if
`schema` is not JSON-serializable.
"""
...
import json # local import by design
if use_json_schema:
if not isinstance(schema, dict):
raise ValueError("`schema` must be a dict when `use_json_schema=True`.")
if not isinstance(schema_name, str) or not schema_name.strip():
raise ValueError("`schema_name` must be a non-empty string.")
name_length = len(schema_name)
if name_length > 64:
raise ValueError(f"`schema_name` must be under 64 char; you tried {name_length}.")
if isinstance(schema_description, str) and not schema_description.strip():
raise ValueError("`schema_description` must be a non-empty string.")
if not isinstance(schema_strict, bool):
raise ValueError("`schema_strict` must be a bool.")
try:
json.dumps(schema)
except (TypeError, ValueError) as exc:
raise ValueError(f"`schema` is not JSON-serializable: {exc}") from exc
# this is the shape to be placed in 'text' of the Responses API
text_obj: dict[str, Any] = {
"format": {
"type": "json_schema",
"name": schema_name,
"description": schema_description,
"strict": schema_strict,
"schema": schema,
}
}
# Chat Completions equivalent container for 'response_format'
response_format_obj: dict[str, Any] = {
"type": "json_schema",
"json_schema": {
"name": schema_name,
"description": schema_description,
"strict": schema_strict,
"schema": schema,
},
}
else:
text_obj = {"format": {"type": "text"}}
response_format_obj = {"type": "text"}
# 'verbosity' only applies to the Responses 'text' object
if verbosity is not None:
text_obj["verbosity"] = str(verbosity)
return response_format_obj if for_chat_completions else text_obj
### END def build_schema_parameter ###
# --------------------------------
# USAGE DEMO
# --------------------------------
def schema_usage_demo():
from typing import Any
import json
# Example JSON Schema for structured output, without the "container"
schema: dict[str, Any] = {
"type": "object",
"properties": {
"response_to_user": {
"type": "string",
"description": "The response message intended for the user."
},
"five_word_title": {
"type": "string",
"description": "A maximum 5-word chat title for this turn."
}
},
"required": ["response_to_user", "five_word_title"],
"additionalProperties": False,
}
text_api_parameter = build_schema_parameter(
schema_name="assistant_response",
schema=schema,
schema_description="Response with user-facing conversational message and 5-word UI title", # optional, best practice
# use_json_schema=True, # default is already true
# schema_strict=True, # default is already strict
# for_chat_completions=False, # default False is for the Responses API
verbosity="low", # optional; applied only to Responses API; use for gpt-5 only
)
# Minimal request body for POST /v1/responses
request_payload: dict[str, Any] = {
"model": "gpt-5-mini",
"input": "Ping!",
"text": text_api_parameter,
"store": False,
}
print(f"Requests body:\n{json.dumps(request_payload, indent=2)}")
# Make a demonstration call, requires openai SDK and OPENAI_API_KEY env var
if True:
from openai import Client
c=Client()
response = c.responses.create(**request_payload)
print(response.output_text, end="\n---\n")
response_format_api_parameter = build_schema_parameter(
schema_name="assistant_response",
schema=schema,
)
# Even more minimal request body and demo call for POST /v1/chat/completions
request_payload: dict[str, Any] = {
"model": "gpt-5-nano",
"messages": [{"role": "user", "content": "Ping!"}],
"response_format": build_schema_parameter(
schema_name="assistant_response",
schema=schema, # the earlier schema
for_chat_completions=True,
),
"store": False,
}
print(f"Chat Completions body:\n{json.dumps(request_payload, indent=2)}")
if True:
from openai import Client; c=Client()
r = c.chat.completions.create(**request_payload)
print(f"Chat Completions said:\n{r.choices[0].message.content}")
if __name__ == "__main__":
schema_usage_demo()
This should be straightforward for AI machine translation to other programming language destinations.
Usage with Pydantic yourself.
If manually creating a schema is still overwhelming, you can also use Pydantic to create a schema - the same way that OpenAI’s SDK can consume a Pydantic BaseModel as a response format input with its “parsed” method. And actually see what is being sent.
You only need to ensure the “strict” requirements are added yourself…
from typing import Any, Dict
import json
from pydantic import BaseModel, Field, ConfigDict # Pydantic 2.5
class AssistantResponse(BaseModel):
# Forbid extra keys -> "additionalProperties": false in JSON Schema
model_config = ConfigDict(extra="forbid")
# No defaults -> both fields are "required" in JSON Schema
response_to_user: str = Field(description="The response message intended for the user.")
five_word_title: str = Field(description="A maximum 5-word chat title for this turn.")
schema: Dict[str, Any] = AssistantResponse.model_json_schema()
print(json.dumps(schema, indent=2), end="\n\n") # prove the schema shape
text_api_parameter = build_schema_parameter(
schema_name=schema["title"],
schema=schema,
schema_description="Conversation response and 5-word UI title",
)
print(json.dumps(text_api_parameter, indent=2)) # prove the 'text' shape
The `text` field from pydantic+function ready to be used
{
"format": {
"type": "json_schema",
"name": "AssistantResponse",
"description": "Conversation response and 5-word UI title",
"strict": true,
"schema": {
"additionalProperties": false,
"properties": {
"response_to_user": {
"description": "The response message intended for the user.",
"title": "Response To User",
"type": "string"
},
"five_word_title": {
"description": "A maximum 5-word chat title for this turn.",
"title": "Five Word Title",
"type": "string"
}
},
"required": [
"response_to_user",
"five_word_title"
],
"title": "AssistantResponse",
"type": "object"
}
}
}