Iāve found that if you want to filter any input model and any input reasoning effort to one that is supported, consumption of this table is pretty much the way to go.
In Python, where int ā boolean
# Reasoning effort support truth table (verified against the OpenAI API)
# Each list corresponds to: [none, minimal, low, medium, high, xhigh]
REASONING_EFFORTS: dict[str, list[int]] = {
# model id [none,minimal, low,medium, high, xhigh]
# GPT-5.2 family
"gpt-5.2-2025-12-11": [ 1, 0, 1, 1, 1, 1],
"gpt-5.2-pro-2025-12-11": [ 0, 0, 0, 1, 1, 1],
"gpt-5.2-chat-latest": [ 0, 0, 0, 1, 0, 0],
"gpt-5.2-codex": [ 0, 0, 1, 1, 1, 1],
# GPT-5.1 family
"gpt-5.1-2025-11-13": [ 1, 0, 1, 1, 1, 0],
"gpt-5.1-chat-latest": [ 0, 0, 0, 1, 0, 0],
"gpt-5.1-codex": [ 0, 0, 1, 1, 1, 0],
"gpt-5.1-codex-mini": [ 0, 0, 1, 1, 1, 0],
"gpt-5.1-codex-max": [ 0, 0, 1, 1, 1, 1],
# GPT-5 family
"gpt-5-pro-2025-10-06": [ 0, 0, 0, 0, 1, 0],
"gpt-5-2025-08-07": [ 0, 1, 1, 1, 1, 0],
"gpt-5-codex": [ 0, 0, 1, 1, 1, 0],
"gpt-5-mini-2025-08-07": [ 0, 1, 1, 1, 1, 0],
"gpt-5-nano-2025-08-07": [ 0, 1, 1, 1, 1, 0],
# O-series
"o3-pro-2025-06-10": [ 0, 0, 1, 1, 1, 0],
"o4-mini-2025-04-16": [ 0, 0, 1, 1, 1, 0],
"o3-2025-04-16": [ 0, 0, 1, 1, 1, 0],
"o3-mini-2025-01-31": [ 0, 0, 1, 1, 1, 0],
"o1-pro-2025-03-19": [ 0, 0, 1, 1, 1, 0],
"o1-2024-12-17": [ 0, 0, 1, 1, 1, 0],
}
Thatās all currently; if the model is not there, it can be considered a model NOT to pass reasoning.effort to.
Then, to make use of it
REASONING_EFFORT_LEVELS: tuple[str, ...] = ("none", "minimal", "low", "medium", "high", "xhigh")
_EFFORT_RANK: dict[str, int] = {e: i for i, e in enumerate(REASONING_EFFORT_LEVELS)}
def get_allowed_reasoning_effort(model: str, requested: str = "low") -> str | None:
"""
Return an allowed reasoning.effort for this model, or None if the model should not receive
a reasoning.effort parameter (i.e. it's not in REASONING_EFFORTS).
Rules:
1) Exact model lookup in REASONING_EFFORTS; if missing -> None
2) If requested is supported -> use it
3) Treat "none" <-> "minimal" as equivalent: if requested is one and only the other is supported,
return the supported one
4) Otherwise choose the closest supported effort by rank; ties break toward higher effort
"""
mask = REASONING_EFFORTS.get(model)
if mask is None or len(mask) != len(REASONING_EFFORT_LEVELS):
return None
requested = requested.lower()
if requested not in _EFFORT_RANK:
requested = "low"
req_rank = _EFFORT_RANK[requested]
if bool(mask[req_rank]):
return requested
if requested == "none" and bool(mask[_EFFORT_RANK["minimal"]]):
return "minimal"
if requested == "minimal" and bool(mask[_EFFORT_RANK["none"]]):
return "none"
best_rank: int | None = None
best_dist: int | None = None
for rank, supported in enumerate(mask):
if not bool(supported):
continue
dist = abs(rank - req_rank)
if best_rank is None or dist < best_dist or (dist == best_dist and rank > best_rank):
best_rank = rank
best_dist = dist
return None if best_rank is None else REASONING_EFFORT_LEVELS[best_rank]
Validation: running all models and all efforts produces only support table
REASONING_EFFORT_LEVELS: tuple[str, ...] = ("none", "minimal", "low", "medium", "high", "xhigh")
def _supported_set(mask: list[int]) -> set[str]:
return {REASONING_EFFORT_LEVELS[i] for i, v in enumerate(mask) if v}
def _mask_from_set(s: set[str]) -> list[int]:
return [int(e in s) for e in REASONING_EFFORT_LEVELS]
def validate_reasoning_effort_truth_table(
fn,
table: dict[str, list[int]] = REASONING_EFFORTS,
) -> None:
for model, truth_mask in table.items():
supported = _supported_set(truth_mask)
outs = {fn(model, req) for req in REASONING_EFFORT_LEVELS}
assert None not in outs, (model, outs)
assert outs == supported, (model, outs, supported)
out_mask = _mask_from_set(outs)
assert out_mask == truth_mask, (model, out_mask, truth_mask)
# Stronger guarantee: identity on supported values
for i, req in enumerate(REASONING_EFFORT_LEVELS):
if truth_mask[i]:
out = fn(model, req)
assert out == req, (model, req, out)
# Run:
validate_reasoning_effort_truth_table(get_allowed_reasoning_effort)
print("ok")