I am currently facing difficulties implementing async generator using Python API. I am wondering if it is a limitation of OpenAI API.
import asyncio
async def async_generator(prompt):
res = await async_client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": sys_prompt},
{"role": "user", "content": prompt}
],
temperature=0,
stream=True
)
# return res['choices'][0]['message']['content'].strip()
yield res
async def main():
async for i in async_generator(prompt):
print(i)
asyncio.run(main())
The value printed is <openai.AsyncStream object at 0x12996dc30> instead of the stream of data. Thanks.