to gracefully shut down streaming
wherever your backend is listening to front end add a handle which listens to some sort of stop signal for example
async def handle_stop_request(user_id):
"""stops the stream for the given user_id."""
if user_id in continue_streaming:
continue_streaming[user_id] = False
print(f"\n[handle_stop_request]: Stopping the stream for user {user_id}.")
else:
print(
f"\n[handle_stop_request]: No active stream or already stopping for user {user_id}."
)
before you initiate the openai streaming call set the flag True ofc:
continue_streaming[user_id] = True # set True
after that in the module where u are handling streamed chunks you can simply check this flag and break the loop:
async def consume_async_generator(user_id, websocket, generator):
full_response = ""
compl_response = ""
try:
async for response in generator:
if isinstance(response, str):
if continue_streaming[user_id] is False:
print(
f"\n[consume_async_generator]: stopping streaming for user {user_id}"
)
break
full_response = response
compl_response += full_response
message = json.dumps({"type": "response", "data": full_response})
await websocket.send_text(message)
continue_streaming[user_id] = True