Object of type ChatCompletionMessageToolCall is not JSON serializable

I’m struggling to understand what I’m doing wrong here. The only difference between my code, that I see, and the example is where I am storing the messages. I have a separate file called ‘conversations.json’ that is being kept for persistence.

import os, json, pytz
from dotenv import load_dotenv
from openai import OpenAI
import flask_socketio
from datetime import datetime

load_dotenv()

OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
client = OpenAI(api_key=OPENAI_API_KEY)

model = "gpt-4o"

### File Handling ###
#####################

# Files
CONVERSATION_FILE = "config/conversation.json"
TOOLS_FILE = "config/tools.json"

### CONVERSATIONS
# Load Conversation
with open(CONVERSATION_FILE, 'r') as file:
    conversation = json.load(file)

# Saves Conversation
def save_conversation():
    with open(CONVERSATION_FILE, 'w') as file:
        json.dump(conversation, file)

### TOOLS
# Current Datetime
def get_datetime(timezone='America/New_York'):
    timestamp = datetime.now(pytz.timezone(timezone))
    return timestamp.strftime('%A, %B %d, %Y %I:%M:%S %p %Z')

# Load Tools
with open(TOOLS_FILE, 'r') as file:
    tools = json.load(file)

### MEMORIES
# Load JSONs
def load_json(input):
    try:
        with open(input, "r") as file:
            data = json.load(file)
            return data
    except FileNotFoundError:
        return []

# Formats the Messages into the required array format for the Chat Completions Model
def message_array(input, role):
    conversation.append({'role': role, 'content': input})
    save_conversation()
    return conversation

### OpenAI API Handling ###
def get_response(data):
    user_message = data['message']
    message_array(user_message, 'user')
    response = client.chat.completions.create(
        model=model,
        messages=conversation[-5:],
        tools=tools,
        tool_choice="auto"
    )
    response_message = response.choices[0].message
    response_content = response_message.content
    tool_calls = response_message.tool_calls
    if response_content:
        message_array(response_content, 'assistant')
    

    if tool_calls:
        conversation.append(tool_calls)

        for tool in tool_calls:
            toolbox(tool)
        print(conversation[-1:])
        second_response = client.chat.completions.create(
            model=model,
            messages=conversation[-5:],
        )
        second_response_content = second_response.choices[0].message.content
        if second_response_content:
            message_array(second_response_content, 'assistant')
        flask_socketio.emit('receive_chat', {'message': second_response_content})
        return None
    else:
        flask_socketio.emit('receive_chat', {'message': response_content})
        return None

def toolbox(tool):
    id = tool.id
    function = tool.function
    args = function.arguments
    name = function.name

    if name == "get_datetime":
        time = get_datetime()
        print(time)
        conversation.append(
            {
                'role': 'tool',
                'tool_call_id': id,
                'name': name,
                'content': time
            }
        )
        save_conversation()
    return None

You have overlooked that the response has multiple layers of pydantic model objects that have methods you can use.

Here’s a demonstration that should yield inspiration. I use the with_raw_response that can also give you the response headers also. Then I immediately get the response string and use the JSON library to give a Python data object of dictionaries, lists, and strings that you can use in ways you may be familiar with.

First I set up an API call that might either use tools or functions depending on what you uncomment:

from openai import OpenAI
import json
client = OpenAI(timeout=30)

toolspec=[]
toolspec.extend(
    [
        {
            "type": "function",
            "function": {
                "name": "world_capitals",
                "description": "This function is used to retrieve up-to-date country capitals",
                "parameters": {
                    "type": "object",
                    "properties": {"country_name": {"type": "string"}},
                    "required": ["country_name"],
                }
            }
        }
    ]
)

functionspec=[]
functionspec.extend(
    [
        {
            "name": "world_capitals",
            "description": "This function is used to retrieve up-to-date country capitals",
            "parameters": {
                "type": "object",
                "properties": {"country_name": {"type": "string"}},
                "required": ["country_name"],
            }
        }
    ]
)

params = {
    "model": "gpt-3.5-turbo",
    "top_p": 0.1,       # value 0.0-1.0, reduce to restrict to more certain tokens
    "max_tokens": 900,  # response length before truncation
    #"functions": functionspec,                     # functions: provide functions to AI
    #"function_call": {"name": "world_capitals"},   # functions: mandate calling a function (or auto)
    "tools": toolspec,                              # tools: provide tools to AI
    "tool_choice": "auto",                          # tools: mandate calling a tool (or auto)
    #"parallel_tool_calls": False,                  # tools: disable the parallel tool wrapper spec
    "logprobs": True, "top_logprobs": 3,            # logprobs (output disabled when tool or function)
    #"logit_bias": {"2": -1},                       # re-weight tokens, from -100 to 100
    "messages": [
        {
            "role": "system",
            "content":
            "You are APIChat, a large language AI. Knowledge cutoff: 2023-10, Current date: 2024-06-11",
        },
        {
            "role": "user",
            "content": "What is the capital of France? What is the capital of Germany?",
        },
    ],
}

# then do the API call, and get any types of AI response the API may return

print(f"asking: {params['messages'][1]['content']}")
c = client.chat.completions.with_raw_response.create(**params)
# Parsing the 'LegacyAPIResponse' object httpx raw response from non-streaming
api_return_dict = json.loads(c.text)
api_message_str = api_return_dict.get('choices')[0].get('message').get('content')
api_tools_list = api_return_dict.get('choices')[0].get('message').get('tool_calls')
api_functions_dict = api_return_dict.get('choices')[0].get('message').get('function_call')

if api_message_str:
    print(api_message_str)
if api_functions_dict:
    print(f"function:\n{api_functions_dict}")
if api_tools_list:
    for tool_index, tool_item in enumerate(api_tools_list):
        print(f"tool {tool_index}: \n{json.dumps(tool_item, indent=2)}")

(warning: this particular prompt is specifically to show a fault with the model, where it invokes multiple tools instead of responding to the user, regardless of whether the tools provided would be useful).

Perhaps you can adapt this demonstration, printing out what is emitted by AI, into your use of tools and returning tool responses back to the model:

asking: What is the capital of France? What is the capital of Germany?
tool 0: 
{
  "id": "call_h6yXwlLnCIZ4G0S9x7ZuY3pP",
  "type": "function",
  "function": {
    "name": "world_capitals",
    "arguments": "{\"country_name\": \"France\"}"
  }
}
tool 1: 
{
  "id": "call_Okoxbx99lCMVrN5w8XbFhmTM",
  "type": "function",
  "function": {
    "name": "world_capitals",
    "arguments": "{\"country_name\": \"Germany\"}"
  }
}