import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
from agents import Agent, Runner
from agents.model_settings import ModelSettings, Reasoning
from agents import CodeInterpreterTool
from coscience.abstract.response import ModelResponse
from pydantic import Field
from openai import AsyncOpenAI
from openai.types.responses import ResponseCodeInterpreterToolCall
client = AsyncOpenAI()
class AssistantResponse(ModelResponse):
response: str = Field(..., description="Assistant response")
agent = Agent(
name="Assistant",
instructions=(
"You are a helpful agent."
),
model='gpt-5-mini',
model_settings=ModelSettings(
reasoning=Reasoning(
effort="low",
),
),
tools=[CodeInterpreterTool(
tool_config={"type": "code_interpreter", "container": {"type": "auto"}}
)],
output_type=AssistantResponse,
)
resp = await Runner.run(agent, (
"Create a tiny CSV file named 'hello.csv' with header 'name,score' and two rows:\n"
"Alice,10\nBob,9\n"
"Also create a tiny XLSX file named 'hello.xlsx' with the same content.\n"
"Then create a plot of the data from the CSV file using matplotlib (include download link for the file)."
))
container_ids = [
output.container_id
for item in resp.raw_responses
for output in item.output
if isinstance(output, ResponseCodeInterpreterToolCall)
]
output_files = []
for container_id in container_ids:
files = await client.containers.files.list(container_id=container_id)
for file in files.data:
output_files.append({
"file_id": file.id,
"container_id": container_id,
"path": file.path,
"source": file.source,
})
print(resp.new_items)
[ReasoningItem(agent=Agent(name='Assistant', handoff_description=None, tools=[CodeInterpreterTool(tool_config={'type': 'code_interpreter', 'container': {'type': 'auto'}})], mcp_servers=[], mcp_config={}, instructions='You are a helpful agent.', prompt=None, handoffs=[], model='gpt-5-mini', model_settings=ModelSettings(temperature=None, top_p=None, frequency_penalty=None, presence_penalty=None, tool_choice=None, parallel_tool_calls=None, truncation=None, max_tokens=None, reasoning=Reasoning(effort='low', generate_summary=None, summary=None), verbosity=None, metadata=None, store=None, include_usage=None, response_include=None, top_logprobs=None, extra_query=None, extra_body=None, extra_headers=None, extra_args=None), input_guardrails=[], output_guardrails=[], output_type=<class '__main__.AssistantResponse'>, hooks=None, tool_use_behavior='run_llm_again', reset_tool_choice=True), raw_item=ResponseReasoningItem(id='rs_68b0b45192f08190ab54d4e82e72180e0cd29e5dd2e39013', summary=[], type='reasoning', content=None, encrypted_content=None, status=None), type='reasoning_item'), ToolCallItem(agent=Agent(name='Assistant', handoff_description=None, tools=[CodeInterpreterTool(tool_config={'type': 'code_interpreter', 'container': {'type': 'auto'}})], mcp_servers=[], mcp_config={}, instructions='You are a helpful agent.', prompt=None, handoffs=[], model='gpt-5-mini', model_settings=ModelSettings(temperature=None, top_p=None, frequency_penalty=None, presence_penalty=None, tool_choice=None, parallel_tool_calls=None, truncation=None, max_tokens=None, reasoning=Reasoning(effort='low', generate_summary=None, summary=None), verbosity=None, metadata=None, store=None, include_usage=None, response_include=None, top_logprobs=None, extra_query=None, extra_body=None, extra_headers=None, extra_args=None), input_guardrails=[], output_guardrails=[], output_type=<class '__main__.AssistantResponse'>, hooks=None, tool_use_behavior='run_llm_again', reset_tool_choice=True), raw_item=ResponseCodeInterpreterToolCall(id='ci_68b0b452dabc819087480e30cb547d470cd29e5dd2e39013', code='# Create CSV and XLSX files and plot using matplotlib\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n# Data\r\ndata = {"name": ["Alice", "Bob"], "score": [10, 9]}\r\ndf = pd.DataFrame(data)\r\n\r\n# File paths\r\ncsv_path = "/mnt/data/hello.csv"\r\nxlsx_path = "/mnt/data/hello.xlsx"\r\nplot_path = "/mnt/data/hello_plot.png"\r\n\r\n# Save CSV and XLSX\r\ndf.to_csv(csv_path, index=False)\r\ndf.to_excel(xlsx_path, index=False, engine=\'openpyxl\')\r\n\r\n# Create plot\r\nplt.figure(figsize=(4,3))\r\nplt.bar(df[\'name\'], df[\'score\'], color=[\'#4C72B0\', \'#55A868\'])\r\nplt.title(\'Scores by Name\')\r\nplt.ylabel(\'Score\')\r\nplt.ylim(0, max(df[\'score\']) + 1)\r\nfor i, v in enumerate(df[\'score\']):\r\n plt.text(i, v + 0.05, str(v), ha=\'center\')\r\nplt.tight_layout()\r\nplt.savefig(plot_path, dpi=150)\r\nplt.show()\r\n\r\n# List created files\r\nimport os\r\nos.listdir(\'/mnt/data\')[:10] # show files in the data directory\r\n\r\n# Output paths so assistant can provide download links\r\ncsv_path, xlsx_path, plot_path', container_id='cntr_68b0b45138208193ab27705c284819e3044aee3ff3d2b34b', outputs=None, status='completed', type='code_interpreter_call'), type='tool_call_item'), MessageOutputItem(agent=Agent(name='Assistant', handoff_description=None, tools=[CodeInterpreterTool(tool_config={'type': 'code_interpreter', 'container': {'type': 'auto'}})], mcp_servers=[], mcp_config={}, instructions='You are a helpful agent.', prompt=None, handoffs=[], model='gpt-5-mini', model_settings=ModelSettings(temperature=None, top_p=None, frequency_penalty=None, presence_penalty=None, tool_choice=None, parallel_tool_calls=None, truncation=None, max_tokens=None, reasoning=Reasoning(effort='low', generate_summary=None, summary=None), verbosity=None, metadata=None, store=None, include_usage=None, response_include=None, top_logprobs=None, extra_query=None, extra_body=None, extra_headers=None, extra_args=None), input_guardrails=[], output_guardrails=[], output_type=<class '__main__.AssistantResponse'>, hooks=None, tool_use_behavior='run_llm_again', reset_tool_choice=True), raw_item=ResponseOutputMessage(id='msg_68b0b45f1e50819092d43c08a39ab7700cd29e5dd2e39013', content=[ResponseOutputText(annotations=[], text='{"response":"Files created:\\n\\n- CSV: [Download hello.csv](sandbox:/mnt/data/hello.csv)\\n- XLSX: [Download hello.xlsx](sandbox:/mnt/data/hello.xlsx)\\n- Plot image: [Download hello_plot.png](sandbox:/mnt/data/hello_plot.png)\\n\\nThe CSV contains:\\nname,score\\nAlice,10\\nBob,9\\n\\nI also created and displayed a bar plot of the CSV data."}', type='output_text', logprobs=[])], role='assistant', status='completed', type='message'), type='message_output_item')]
print(output_files)
[{'file_id': 'cfile_68b0b46194cc8191bbdad7ef44a4c333', 'container_id': 'cntr_68b0b45138208193ab27705c284819e3044aee3ff3d2b34b', 'path': '/mnt/data/hello.csv [/mnt/data/hello_plot.png](https://file+.vscode-resource.vscode-cdn.net/mnt/data/hello_plot.png)', 'source': 'assistant'}, {'file_id': 'cfile_68b0b46194b881919aee409facc6c407', 'container_id': 'cntr_68b0b45138208193ab27705c284819e3044aee3ff3d2b34b', 'path': '/mnt/data/cfile_68b0b45ebfa88191a151996a6c052e0b.png [/mnt/data/hello.xlsx](https://file+.vscode-resource.vscode-cdn.net/mnt/data/hello.xlsx)', 'source': 'assistant'}, {'file_id': 'cfile_68b0b45ebfa88191a151996a6c052e0b', 'container_id': 'cntr_68b0b45138208193ab27705c284819e3044aee3ff3d2b34b', 'path': '/mnt/data/cfile_68b0b45ebfa88191a151996a6c052e0b.png', 'source': 'assistant'}]
The issue still exists: the annotation is empty in the final answer, and I still see this bug with the path.
files = await client.containers.files.list(container_id=container_id)
files
AsyncCursorPage[FileListResponse](data=[FileListResponse(id='cfile_68b0b46194cc8191bbdad7ef44a4c333', bytes=None, container_id='cntr_68b0b45138208193ab27705c284819e3044aee3ff3d2b34b', created_at=1756410977, object='container.file', path='/mnt/data/hello.csv [/mnt/data/hello_plot.png](https://file+.vscode-resource.vscode-cdn.net/mnt/data/hello_plot.png)', source='assistant'), FileListResponse(id='cfile_68b0b46194b881919aee409facc6c407', bytes=None, container_id='cntr_68b0b45138208193ab27705c284819e3044aee3ff3d2b34b', created_at=1756410977, object='container.file', path='/mnt/data/cfile_68b0b45ebfa88191a151996a6c052e0b.png [/mnt/data/hello.xlsx](https://file+.vscode-resource.vscode-cdn.net/mnt/data/hello.xlsx)', source='assistant'), FileListResponse(id='cfile_68b0b45ebfa88191a151996a6c052e0b', bytes=29754, container_id='cntr_68b0b45138208193ab27705c284819e3044aee3ff3d2b34b', created_at=1756410974, object='container.file', path='/mnt/data/cfile_68b0b45ebfa88191a151996a6c052e0b.png', source='assistant')], has_more=False, object='list', first_id='cfile_68b0b46194cc8191bbdad7ef44a4c333', last_id='cfile_68b0b45ebfa88191a151996a6c052e0b')