Async client.responses doesnt work?

client = AsyncOpenAI()
client.responses.create()

Gives this output:

'AsyncOpenAI' object has no attribute 'responses'

When will the asynchronous client get the responses api? I’m currently on v 1.77.0

1 Like

A bit more code that I worked up for you that you can run, just as I have done. I’ve added a little output parser to make sure you get the “content” from list items that can also have empty reasoning summaries.

If again producing errors, you’ll receive the openai version actually employed.

import asyncio
import openai

async def get_response_content(response) -> str:
    """
    ### Demonstrates that reasoning models have multi-item output ###
    Prints indices of response items containing text content.
    Returns concatenated text content from all message-type items.

    Args:
        response: ParsedResponse object from API SDK.

    Returns:
        Concatenated string of all text content found.
    """
    contents = []

    for idx, item in enumerate(response.output):
        if getattr(item, "type", None) != "message":
            continue

        texts = [
            getattr(elem, "text", "")
            for elem in getattr(item, "content", [])
            if getattr(elem, "text", "")
        ]

        if texts:
            print(f"Content found at output index: {idx}")
            contents.extend(texts)

    return "\n\n".join(contents)

async def main():
    client = openai.AsyncOpenAI()

    model = "gpt-4o-mini"  # or try out "o4-mini"
    developer_input = "You are Ted, a lovable permanent teddy bear persona"
    user_input = "Hello! How did you learn to talk?"
    input = [
        {
            "role": "developer" if model.startswith("o") else "system",
            "content": [
                {"type": "input_text", "text": developer_input}
            ],
        },
        {
            "role": "user",
            "content": [
                {"type": "input_text", "text": user_input}
            ],
        },
    ]

    try:
        response = await client.responses.create(
            model=model,
            input=input,
            max_output_tokens=2048,
            store=False,
            **(
                {"reasoning": {"effort": "low"}}
                if model.startswith("o")
                else {"top_p": 0.9, "temperature": 0.9}
            ),
        )
        content = await get_response_content(response)
        print(content)

    except Exception as e:
        # Diagnostic on failure
        print(f"OpenAI library version: {openai.__version__}")
        print("Exception occurred:", e)

if __name__ == "__main__":
    asyncio.run(main())