More detailed information about models

Regarding:

List models

GET https://api.openai.com/dashboard/models

Could you please extend your API exploration to this endpoint?

Add a model ID to the path to just get one model instead of 3000+ lines:

That way extending applications may be possible if you can anticipate the existence of future feature flags that are in array “features”.

The question is how to make the data structure future-safe against new strings that act like boolean gates. You might not have been able to anticipate a new model that added "reasoning_effort_none".

Thus of course, to use new feature strings appearing in the list of features you receive, the code would have to be changed anyway.

Read up here:


In terms of “friendly names”, you can employ only “aliases” without dates, which are not indicated by the models endpoint either when they are returned or by where they point…You’d need some object that relates “alias” (short name) back to the full “snapshot” (version with date that is not a snapshot in time) that is being run, where you might present only alias in a UI…

Here’s hours of labor compiling and verifying where OpenAI documentation is lacking, that you get for free today in consumable format, (while the support functions, strict functions, custom functions, json_object, json_schema, patches tool support, token costs for standard/priority/batch, cached pricing and per-model if 24hr, audio, images costs, deprecations warns and auto-shutoffs, etc I will hold out, as actual work that benefits someone usually means +0 likes in this forum):

Python code, but should avoid non-json besides the comments.

MODEL_CAPABILITIES = {
    # GPT-5.2 Family (special-case sampling behavior at low-effort settings on non-chat)
    "gpt-5.2-2025-12-11":      {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision": "patch", "vision_mult": 1.2, "verbosity": 1, "alias": "gpt-5.2"},
    "gpt-5.2-pro-2025-12-11":  {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision": "patch", "vision_mult": 1.2, "verbosity": 1, "alias": "gpt-5.2-pro"},
    "gpt-5.2-codex":           {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision": "patch", "vision_mult": 1.2, "verbosity": 1},
    "gpt-5.2-chat-latest":     {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 16, "vision": "patch", "vision_mult": 1.2, "verbosity": 1},  # constrained effort


    # GPT-5.1 Family
    "gpt-5.1-2025-11-13":      {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision":  "tile", "tile_tokens": 70, "verbosity": 1, "alias": "gpt-5.1"},
    "gpt-5.1-codex-max":       {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision":  "tile", "vision_mult": 1.0,"tile_tokens": 70},
    "gpt-5.1-codex":           {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision":  "tile", "tile_tokens": 70},
    "gpt-5.1-codex-mini":      {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision": "patch", "vision_mult": 1.2},
    "gpt-5.1-chat-latest":     {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 16, "vision":  "tile", "tile_tokens": 70},

    # GPT-5 Family (verbosity supported on reasoning variants)
    "gpt-5-2025-08-07":        {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision":  "tile", "tile_tokens": 70, "verbosity": 1, "alias": "gpt-5"},
    "gpt-5-mini-2025-08-07":   {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision": "patch", "vision_mult": 1.2, "verbosity": 1, "alias": "gpt-5-mini"},
    "gpt-5-nano-2025-08-07":   {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision": "patch", "vision_mult": 1.5, "verbosity": 1, "alias": "gpt-5-nano"},
    "gpt-5-pro-2025-10-06":    {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision":  "tile", "tile_tokens": 70, "verbosity": 1, "alias": "gpt-5-pro"},
    "gpt-5-codex":             {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision":  "tile", "tile_tokens": 70},
    "gpt-5-chat-latest":       {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 16, "vision":  "tile", "tile_tokens": 70, "sampling": 1},

    # O-Series (no verbosity)
    "o3-pro-2025-06-10":       {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 97.65625, "vision":  "tile", "tile_tokens": 75, "alias": "o3-pro"},
    "o3-2025-04-16":           {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 97.65625, "vision":  "tile", "tile_tokens": 75, "alias": "o3"},
    "o4-mini-2025-04-16":      {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 97.65625, "vision": "patch", "vision_mult": 1.72, "alias": "o4-mini"},
    "o1-pro-2025-03-19":       {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 97.65625, "vision":  "tile", "tile_tokens": 75, "alias": "o1-pro"},
    "o3-mini-2025-01-31":      {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 97.65625, "vision":    None, "alias": "o3-mini"},
    "o1-2024-12-17":           {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 97.65625, "vision":  "tile", "tile_tokens": 75, "alias": "o1"},
    "codex-mini-latest":       {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 97.65625, "vision": "patch", "vision_mult": 1.72},
    # deep-research in model requires internal RAG tool or web_search with context:medium, no user location
    "o3-deep-research-2025-06-26": {"cc": 0, "r": 1, "max_out": 97.65625, "vision": "tile", "tile_tokens": 75, "rag_req": 1, "alias": "o3-deep-research"},
    "o4-mini-deep-research-2025-06-26": {"cc": 0, "r": 1, "max_out": 97.65625, "vision": "patch", "vision_mult": 1.72, "rag_req": 1, "alias": "o4-mini-deep-research"},

    # GPT-4x Family with vision (standard sampling)
    "gpt-4.1-2025-04-14":      {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 32, "vision":  "tile",  "tile_tokens": 85, "sampling": 1, "alias": "gpt-4.1"},
    "gpt-4.1-mini-2025-04-14": {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 32, "vision": "patch", "vision_mult": 1.62, "sampling": 1, "alias": "gpt-4.1-mini"},
    "gpt-4.1-nano-2025-04-14": {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 32, "vision": "patch", "vision_mult": 2.46, "sampling": 1, "alias": "gpt-4.1-nano"},
    "gpt-4o-2024-11-20":       {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 16, "vision":  "tile",  "tile_tokens": 85, "sampling": 1},
    "gpt-4o-2024-08-06":       {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 16, "vision":  "tile",  "tile_tokens": 85, "sampling": 1, "alias": "gpt-4o"},
    "gpt-4o-mini-2024-07-18":  {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 16, "vision":  "tile",  "tile_tokens": 85, "vision_mult": 33.333, "sampling": 1, "alias": "gpt-4o-mini"},
    "gpt-4o-2024-05-13":       {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 4, "vision":  "tile",  "tile_tokens": 85, "sampling": 1},
    "chatgpt-4o-latest":       {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 16, "vision":  "tile",  "tile_tokens": 85, "sampling": 1},
    "gpt-4-turbo-2024-04-09":  {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 4, "vision":  "tile",  "tile_tokens": 85, "sampling": 1, "alias": "gpt-4-turbo"},

    # GPT-4 Turbo / Preview, no vision
    "gpt-4-1106-preview":   {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 4, "sampling": 1},
    "gpt-4-0125-preview":   {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 4, "sampling": 1, "alias": "gpt-4-turbo-preview"},

    # GPT-4 (legacy, no vision)
    "gpt-4-0314":      {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 8, "sampling": 1, "vision": None},
    "gpt-4-0613":      {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 8, "sampling": 1, "vision": None, "alias": "gpt-4"},

    # GPT-3.5 (legacy, no vision)
    "gpt-3.5-turbo-16k":      {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 4, "sampling": 1, "vision": None, "to": "gpt-3.5-turbo-0125"},
    "gpt-3.5-turbo-1106":     {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 4, "sampling": 1, "vision": None},
    "gpt-3.5-turbo-0125":     {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 4, "sampling": 1, "vision": None, "alias": "gpt-3.5-turbo"},

    # Audio Models - needs newer "gpt-audio-2025-08-28", mini-10-06" , mini-12-15 models available on cc 
    "gpt-4o-audio-preview-2024-12-17":      {"cc": 1, "r": 0, "msg": 4, "plus": 3, "max_out": 16, "vision": "tile", "tile_tokens": 85, "sampling": 1},
    "gpt-4o-audio-preview-2025-06-03":      {"cc": 1, "r": 0, "msg": 4, "plus": 3, "max_out": 16, "vision": "tile", "tile_tokens": 85, "sampling": 1, "alias": "gpt-4o-audio-preview"},
    "gpt-4o-mini-audio-preview":            {"cc": 1, "r": 0, "msg": 4, "plus": 3, "max_out": 16, "vision": "tile", "tile_tokens": 85, "vision_mult": 33.333, "sampling": 1,},
    "gpt-4o-mini-audio-preview-2024-12-17": {"cc": 1, "r": 0, "msg": 4, "plus": 3, "max_out": 16, "vision": "tile", "tile_tokens": 85, "vision_mult": 33.333, "sampling": 1,},

    # special: "computer-use-preview" takes only screenshot tool return; any CC "search" model takes no images
    "computer-use-preview-2025-03-11":    {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 16, "vision": False, "tile_tokens": 65, "alias": "computer-use-preview"}, # requires truncation:auto

    "gpt-5-search-api-2025-10-14":        {"cc": 1, "r": 0, "msg": 4, "plus": 2, "max_out": 16, "vision": False, "alias": "gpt-5-search-api"},
    "gpt-4o-search-preview-2025-03-11":   {"cc": 1, "r": 0, "msg": 4, "plus": 3, "max_out": 16, "vision": False, "sampling": 1, "alias": "gpt-4o-search-preview"},
    "gpt-4o-mini-search-preview-2025-03-11": {"cc": 1, "r": 0, "msg": 4, "plus": 3, "max_out": 16, "vision": False, "sampling": 1, "alias": "gpt-4o-mini-search-preview"},

    # shutoff chat models, remains for persisting fine-tuning models, history
    "gpt-4.5-preview":                 {"cc": 0, "r": 0, "msg": 4, "plus": 2, "max_out": 4, "vision": "tile", "tile_tokens": 75},  # announced 2025-02-27

    "gpt-4o-audio-preview-2024-10-01": {"cc": 0, "r": 0, "msg": 4, "plus": 3, "max_out": 16, "vision": "tile", "tile_tokens": 85, "sampling": 1},
    "o1-preview-2024-09-12":           {"cc": 0, "r": 0, "msg": 4, "plus": 2, "max_out": 97.65625, "vision": None, "alias": "o1-preview"},
    "o1-mini-2024-09-12":              {"cc": 0, "r": 0, "msg": 4, "plus": 2, "max_out": 97.65625, "vision": None, "alias": "o1-mini"},
    "gpt-4-1106-vision-preview":       {"cc": 0, "r": 0, "msg": 4, "plus": 3, "max_out": 4, "vision": "tile", "tile_tokens": 85, "sampling": 1, "alias": "gpt-4-vision-preview"},
    "gpt-4-32k-0613":                  {"cc": 0, "r": 0, "msg": 4, "plus": 3, "max_out": 8, "sampling": 1, "vision": None, "alias": "gpt-4-32k"},
    "gpt-3.5-turbo-16k-0613":          {"cc": 0, "r": 0, "msg": 4, "plus": 3, "max_out": 4, "sampling": 1, "vision": None},
    "gpt-3.5-turbo-0613":              {"cc": 0, "r": 0, "msg": 4, "plus": 3, "max_out": 4, "sampling": 1, "vision": None},
    "gpt-4-32k-0314":                  {"cc": 0, "r": 0, "msg": 4, "plus": 3, "max_out": 8, "sampling": 1, "vision": None},
    "gpt-3.5-turbo-0301":              {"cc": 0, "r": 0, "msg": 4, "plus": 3, "max_out": 4, "sampling": 1, "vision": None},

    # completions, deprecated
    "gpt-3.5-turbo-instruct": {"c": 1, "sampling": 1, "max_out": 4},
    "davinci-002":            {"c": 1, "sampling": 1, "max_out": 4},
    "babbage-002":            {"c": 1, "sampling": 1, "max_out": 4},
    "text-davinci-003": {"max_out": 4},

    # sneaky models
    "gpt-5.1-mini":                  {"max_out": 125},  # was in spec, presumably 2025-11-13, never delivered

    "gpt-5-pro-alpha-2025-09-15":    {"cc": 0, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision":  "tile", "tile_tokens": 70, "verbosity": 1, "alias": "gpt-5-pro"},
    "gpt-4o-2024-08-06-alpha":       {"cc": 1, "r": 1, "msg": 4, "plus": 3, "max_out": 16, "vision":  "tile",  "tile_tokens": 85, "sampling": 1, "alias": "gpt-4o"},
    "gpt-5-mini-alpha-2025-08-07":   {"cc": 1, "r": 1, "msg": 4, "plus": 2, "max_out": 125, "vision": "patch", "vision_mult": 1.2, "verbosity": 1, "alias": "gpt-5-mini"},
    "gpt-4-turbo-preview-completions": {"c": 1, "sampling": 1, "max_out": 4},
}
# Note: then context window & max out is useful to gate content and max_output_tokens parameter values, complicated by 272k max input gpt-5

Or use different “true” truth for reasoning.effort.