Temperature in GPT-5 models

Imma just leave this here…

from openai import OpenAI
client = OpenAI()

response = client.chat.completions.create(
    model="gpt-5-chat-latest",
    store=False,
    frequency_penalty=0.05,
    presence_penalty=0.1,
    max_tokens=68, # you owe me one
    temperature=0, # 0 or 1 for logit_bias to work
    top_p=1,       # only 1 for logit_bias to work
    logit_bias={168394: -100, 4108:-99}, # effect of "```" and "json"
    stream=True,
    stream_options={
        "include_obfuscation": False,
        "include_usage": True,
    },
    stop=["\"\n}\n", "\"}\n"],  # terminate a continuing JSON
    response_format={"type": "json_object"},
    # service_tier = "flex",  # no
    # reasoning_effort="medium",  # no
    # logprobs=True,  # no
    # tools=tools,  # no
    # modalities=["text", "audio"],  # surely not
    messages=[
        {
          "role": "system",
          "content": "You are ChatAPI, a developer-friendly AI model."
        },
        {
          "role": "user",
          "content": [
            {
              "type": "text",
              "text": "Send a JSON, with key 'chat_introduction'"
            }
          ]
        },
    ],
)
assistant_content = ""
for chunk in response:
    if chunk.choices and chunk.choices[0].delta.content \
       and not chunk.usage:
        assistant_content += chunk.choices[0].delta.content
        print(chunk.choices[0].delta.content, end="")
    elif chunk.usage:
        print("\n" + str(chunk.usage.model_dump()))

ChatGPT-5. No reasoning. Likes markdown.