Sessions and agent handovers approach

Hi All,

I’m learning to develop agents and am building my own personal agents using Agents SDK and GPT-OSS.

I’ve built an extendable solution that allows me to onboard new agents into. Yesterday I was researching handovers and was able to build a system that allowed handover to another agent. My challenge was how to keep the conversation with the agent it was handed it over to.

In my solution I do not keep the conversation loop open and use sessions to keep the conversation state, but I was expecting that that would also handle a earlier handover, but what I saw happening is that the conversation always continued with the initial (router) agent first. I then wrote a small piece of code that would check the earlier conversation for the last active agent and continue the conversation by adding that agent to the runner. This works, but I was wondering how the whole handover system and sessions systems are supposed to work. It seems like I’m working around I problem I created myself.

How do you guys solve this? Is the approach I took wrong or silly?

Thanks for your feedback!

Mark

from contextlib import AsyncExitStack
import asyncio
import streamlit as st
from agents import Runner, set_tracing_disabled
from openai.types.responses import (
    ResponseTextDeltaEvent,
    ResponseReasoningTextDeltaEvent,
)
from config import (
    APP_TITLE,
    MEMORY_BUFFER_SIZE,
)
from utils.sessions import (
    get_messages,
    get_session,
    clear_session,
    get_active_agent_name,
)
from aifred_agents.agent_builder import build_agent
from aifred_agents.aifred import agent as aifred_agent
from aifred_agents.search_agent import agent as search_agent, servers as search_servers
from aifred_agents.debate_agent import facilitator_agent, pro_agent, con_agent

set_tracing_disabled(True)


# Building the nested agent graph with all the agents and their handoffs and tools.
def main():
    async def build_agent_graph(stack: AsyncExitStack):
        agents = {}
        search_agent_instance = await build_agent(search_agent, search_servers, stack)
        agents[search_agent_instance.name] = search_agent_instance
        aifred_agent_instance = await build_agent(aifred_agent, [], stack)
        agents[aifred_agent_instance.name] = aifred_agent_instance
        facilitator_agent_instance = await build_agent(facilitator_agent, [], stack)
        agents[facilitator_agent_instance.name] = facilitator_agent_instance
        pro_agent_instance = await build_agent(pro_agent, [], stack)
        agents[pro_agent_instance.name] = pro_agent_instance
        con_agent_instance = await build_agent(con_agent, [], stack)
        agents[con_agent_instance.name] = con_agent_instance
        facilitator_agent_instance.tools = [
            pro_agent_instance.as_tool(
                tool_name="DEBATE_AGENT_PRO",
                tool_description="Argues in favor of the user's position.",
            ),
            con_agent_instance.as_tool(
                tool_name="DEBATE_AGENT_CON",
                tool_description="Argues against the user's position.",
            ),
        ]
        aifred_agent_instance.handoffs = [
            search_agent_instance,
            facilitator_agent_instance,
        ]

        return agents

    # determine the active agent based on the sessions information
    async def get_active_agent(stack: AsyncExitStack):
        agents = await build_agent_graph(stack)
        active_agent_name = await get_active_agent_name()
        if active_agent_name and active_agent_name in agents.keys():
            return agents[active_agent_name]
        return agents[aifred_agent.name]

    # the main function to stream the agent's conversation reply
    async def stream_agent_reply(
        user_input: str, on_text_delta, on_reasoning_delta, session
    ):
        async with AsyncExitStack() as stack:
            agent = await get_active_agent(stack)
            result = Runner.run_streamed(agent, user_input, session=session)

            full_text = ""
            full_reasoning = ""
            async for event in result.stream_events():
                if event.type == "raw_response_event":
                    if isinstance(event.data, ResponseTextDeltaEvent):
                        delta = event.data.delta or ""
                        if delta:
                            full_text += delta
                            on_text_delta(full_text)  # live update UI
                    elif isinstance(event.data, ResponseReasoningTextDeltaEvent):
                        delta = event.data.delta or ""
                        if delta:
                            full_reasoning += delta
                            on_reasoning_delta(full_reasoning)  # live update UI

            return full_text.strip()

    # Streamlit UI setup
    session = get_session()
    st.markdown(
        f"<h1 style='text-align: center;'>{APP_TITLE}</h1>", unsafe_allow_html=True
    )
    chat_container = st.container()

    with chat_container:
        messages = asyncio.run(get_messages())
        for msg in messages[-MEMORY_BUFFER_SIZE:]:
            with st.chat_message(msg["role"]):
                st.markdown(msg["content"])

    input_col, button_col = st.columns([0.90, 0.10])
    with input_col:
        user_input = st.chat_input(" Type your message...")

    with button_col:
        if st.button("🗑️", use_container_width=True):
            asyncio.run(clear_session())
            st.rerun()

    if user_input:
        with chat_container:
            with st.chat_message("user"):
                st.markdown(user_input)

            expander = st.expander("Show Reasoning", expanded=False)

            with expander:
                reasoning_placeholder = st.empty()

            with st.chat_message("assistant"):
                chat_placeholder = st.empty()

                def on_chat_delta(text: str):
                    chat_placeholder.markdown(text)

                def on_reasoning_delta(reasoning: str):
                    reasoning_placeholder.markdown(reasoning)

                try:
                    final_text = asyncio.run(
                        stream_agent_reply(
                            user_input, on_chat_delta, on_reasoning_delta, session
                        )
                    )
                except Exception as e:
                    final_text = f"Error: {e}"

                chat_placeholder.markdown(final_text)


# main entry point
if __name__ == "__main__":
    main()