OpenAI Agents SDK + LangSmith integration - traceable wrapper

I have a simple tool responsible for making a string into lowercase using LLM as well as an triage Agent responsible for picking up the right tool for the task, in this case there is only one tool. I run the code with the OpenAIAgentsTracingProcessor defined.
This approach works well, the issue is that it created in the LangSmith UI a separate trace for the Agent vs Tool calls (LLM)
How can I have the agent and tool call under the same trace?

if __name__ == "__main__":

    set_trace_processors([OpenAIAgentsTracingProcessor()])
    asyncio.run(main())
@function_tool(name_override="make_lowercase_tool", description_override="Convert the given text to lowercase.")
async def make_lowercase_tool(
    ctx: RunContextWrapper[Any],
    args: TextInput
) -> str:
    """Tool that uses LLM to convert text to lowercase."""
    client = wrap_openai(AsyncOpenAI())

    response = await client.chat.completions.create(
        model="gpt-4o-mini",
        messages=[
            {"role": "system", "content": "Convert the given text to lowercase. Return only the result."},
            {"role": "user", "content": f"Convert to lowercase: {args.text}"}
        ],
        max_tokens=100,
        temperature=0
    )
    
    result = response.choices[0].message.content.strip()

    usage_info = {
        "model": response.model,
        "prompt_tokens": response.usage.prompt_tokens,
        "completion_tokens": response.usage.completion_tokens, # type: ignore
        "total_tokens": response.usage.total_tokens,
        "request_id": response.id,
        "created": response.created,
        "finish_reason": response.choices[0].finish_reason
    }

    print(f"make_lowercase_tool tokens | {usage_info.get("total_tokens")}")

    return json.dumps({
        "result": result,
        "operation": "lowercase",
        "original_text": args.text,
        "usage_metadata": {"input_tokens": usage_info.get("prompt_tokens"),
                        "output_tokens": usage_info.get("completion_tokens"),
                        "total_tokens": usage_info.get("total_tokens")
    }})

async def run_smart_text_agent(user_query: str, text: str) -> str:
    """Run the agent that uses the tool to process the input."""
        
    instructions = (
        "You are a text processing agent. You have 3 tools:\n"
        "- make_lowercase_tool: converts text to lowercase\n"
        "- make_capitalize_tool: capitalizes first letter of each word\n"
        "- count_words_tool: counts words in text\n\n"
        "IMPORTANT: When given a text and a request, immediately use the appropriate tool to process the text. "
        "Do NOT explain your capabilities - just do the work and return the results."
    )
    agent = Agent(
        name="Smart Text Processing Agent",
        instructions=instructions,
        tools=[make_lowercase_tool],
        model="gpt-4o-mini-2024-07-18"
    )
    prompt = f"Process this text: '{text}'. Request: {user_query}. Use the appropriate tool immediately."
    result = await Runner.run(agent, prompt)

    return result.final_output

Would be greatfull for any assistance

Hi Daniel - do the following:

When you define you open-ai client add this:

from openai import AsyncOpenAI
from langsmith.wrappers import wrap_openai
import os
from agents import set_default_openai_client


client = wrap_openai(AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY")))
set_default_openai_client(client)

Then, where you run the agent you have created using the wrapped client do:


    @traceable(run_type="tool")
    async def do_stuff(self, query: str):
        parent = ls.get_current_run_tree()

        with ls.tracing_context(parent=parent):
            result = await Runner.run(your_agent, query)

        return result

Finally, remove set_trace_processors([OpenAIAgentsTracingProcessor()])

Hope this helps!

Paul

I set the set_default_openai_client(client) in the Tool calls as they directly call OpenAI client.

But even with the @traceable wrapper for the agent function, the token count isnt traced by LangSmith of the agent - just the tool calls

You can see now everything is under one Trace - but the Agent call itself isnt traced properly - the token count for agent is 620

```import json
from typing import Any
from pydantic import BaseModel, Field
from agents import Agent, Runner, RunContextWrapper, function_tool, FunctionTool
from openai import AsyncOpenAI

from dotenv import load_dotenv
from langsmith import traceable
from langsmith.wrappers import wrap_openai
from agents import set_default_openai_client
from langsmith import get_current_run_tree, tracing_context

load_dotenv()

class TextInput(BaseModel):
    text: str = Field(..., min_length=1, max_length=10_000, description="Text to process")

@traceable(run_type="tool")
async def _make_lowercase_on_invoke(
    ctx: RunContextWrapper[Any],
    args_json: str
) -> str:
    """Tool that uses LLM to convert text to lowercase."""

    args = TextInput.model_validate_json(args_json)

    client = wrap_openai(AsyncOpenAI())
    set_default_openai_client(client)

    response = await client.chat.completions.create(
        model="gpt-4o-mini",
        messages=[
            {"role": "system", "content": "Convert the given text to lowercase. Return only the result."},
            {"role": "user", "content": f"Convert to lowercase: {args.text}"}
        ],
        max_tokens=100,
        temperature=0)
    
    
    result = (response.choices[0].message.content or "").strip()

    usage_info = {
        "model": response.model,
        "prompt_tokens": getattr(response.usage, "prompt_tokens", None),
        "completion_tokens": getattr(response.usage, "completion_tokens", None),
        "total_tokens": getattr(response.usage, "total_tokens", None),
        "request_id": response.id,
        "created": response.created,
        "finish_reason": response.choices[0].finish_reason,
    }

    print(f"make_lowercase_tool tokens | {usage_info.get("total_tokens")}")

    return json.dumps({
        "result": result,
        "operation": "lowercase",
        "original_text": args.text,
        "usage_metadata": {
            "input_tokens": usage_info.get("prompt_tokens"),
            "output_tokens": usage_info.get("completion_tokens"),
            "total_tokens": usage_info.get("total_tokens")
    }})
MAKE_LOWERCASE_SCHEMA = {
    "type": "object",
    "properties": {
        "text": {
            "type": "string",
            "minLength": 1,
            "maxLength": 10_000,
            "description": "Text to process",
        }
    },
    "required": ["text"],
    "additionalProperties": False,}

make_lowercase_tool = FunctionTool(
    name="make_lowercase_tool",
    description="Convert the given text to lowercase.",
    params_json_schema=MAKE_LOWERCASE_SCHEMA,
    on_invoke_tool=_make_lowercase_on_invoke,
)

@traceable(run_type="tool")
async def _count_words_on_invoke(
    ctx: RunContextWrapper[Any],
    args_json: str
) -> str:
    """Tool that uses LLM to count words in text."""

    args = TextInput.model_validate_json(args_json)

    client = wrap_openai(AsyncOpenAI())
    set_default_openai_client(client)

    response = await client.chat.completions.create(
        model="gpt-4o-mini",
        messages=[
            {"role": "system", "content": "Count the number of words in the given text. Return only the number as a string."},
            {"role": "user", "content": f"Count words in: {args.text}"}
        ],
        max_tokens=50,
        temperature=0)
    
    result = (response.choices[0].message.content or "").strip()

    usage_info = {
        "model": response.model,
        "prompt_tokens": getattr(response.usage, "prompt_tokens", None),
        "completion_tokens": getattr(response.usage, "completion_tokens", None),
        "total_tokens": getattr(response.usage, "total_tokens", None),
        "request_id": response.id,
        "created": response.created,
        "finish_reason": response.choices[0].finish_reason,
    }

    print(f"count_words_tool tokens | {usage_info.get("total_tokens")}")

    return json.dumps({
        "result": result,
        "operation": "count_words",
        "original_text": args.text,
        "usage_metadata": {
            "input_tokens": usage_info.get("prompt_tokens"),
            "output_tokens": usage_info.get("completion_tokens"),
            "total_tokens": usage_info.get("total_tokens")
    }})

COUNT_WORDS_SCHEMA = {
    "type": "object",
    "properties": {
        "text": {
            "type": "string",
            "minLength": 1,
            "maxLength": 10_000,
            "description": "Text to process",
        }
    },
    "required": ["text"],
    "additionalProperties": False,}

count_words_tool = FunctionTool(
    name="count_words_tool",
    description="Count the number of words in the given text.",
    params_json_schema=COUNT_WORDS_SCHEMA,
    on_invoke_tool=_count_words_on_invoke,
)

@traceable(run_type='tool')
async def run_smart_text_agent(user_query: str, text: str) -> str:
    """Run the agent that uses the tool to process the input."""

    instructions = (
        "You are a text processing agent. You have 2 tools:\n"
        "- make_lowercase_tool: converts text to lowercase\n"
        "- count_words_tool: counts words in text\n\n"
        "IMPORTANT: When given a text and a request, immediately use the appropriate tool to process the text. "
        "Do NOT explain your capabilities - just do the work and return the results."
    )
    agent = Agent(
        name="Smart Text Processing Agent",
        instructions=instructions,
        tools=[make_lowercase_tool, count_words_tool],
        model="gpt-4o-mini-2024-07-18"
    )
    prompt = f"Process this text: '{text}'. Request: {user_query}. Use the appropriate tool immediately."

    parent = get_current_run_tree()
    with tracing_context(parent=parent):
        result = await Runner.run(agent, prompt)

    # (optional) create agent insight summary
    summary = []
    for i, mr in enumerate(result.raw_responses):
        model = result._last_agent.model
        if i < len(result.new_items) and hasattr(result.new_items[i], "agent"):
            model = result.new_items[i].agent.model or model
        u = mr.usage
        summary.append({
            "step": i,
            "model": model,
            "requests": u.requests,
            "input_tokens": u.input_tokens,
            "output_tokens": u.output_tokens,
            "total_tokens": u.total_tokens,
            "response_id": getattr(mr, "response_id", None),
        })
    # print(summary)
    print("Agent tokens |: ",result.context_wrapper.usage.total_tokens)
    return result.final_output

Additionally, I have a tool call in another script that is called independently but I want to put it under same trace.
Any hint on how to do that?