Issue with function dependencies in parallel function calling

When I provide ‘tools’ and set ‘tool_choice = auto’, I found that ‘gpt-3.5-turbo-1106’ failed to realize that some functions need the input from other functions and simply response with all tool calls in one single response. This issue does not exist in ‘gpt-3.5-turbo-0613’

In the sample code below, I provide the instructions, 2 tools (check_stock & send instant message) and the prompt “Send the stock levels of iPad and keyboard in an instant message to Peter and remind him to attend tomorrow’s meeting.”. The LLM responded with calling correctly the check_stock twice with different stock items but at the same time, just send “Please attend tomorrow’s meeting.” to Peter without the stock levels of iPad and keyboard!

So far, I found 2 fixes that works.

  1. Need to include dependencies in the instructions: “- Do not execute the send instant message function in parallel with other functions.”

  2. Add “about this” to the end of the original prompt.

With one of the above fixes, LLM realized that it needs one pass to collect stock levels and another pass to compose the instant message. I hope LLM can be more smart in analyzing dependencies in using the given tools.

import os
from openai import OpenAI
import json
import random

client = OpenAI(
    api_key=os.environ.get("OpenAIKey"),
)

# Example dummy function to randomly generate a stock quantity on hand
# In production, this could be your backend API or an external API
def check_stock(stockItem):
    qty_on_hand = random.randint(1, 200)
    print(f"***Local function running: found {qty_on_hand} {stockItem}")
    return f"found {qty_on_hand} {stockItem}"

# Example dummy function to print the message on the console
# In production, this could be your backend Email server API
def send_instant_message(recipient, message):
    print(f"***Local function running: '{message}' sent to {recipient}")
    return f"Instant message: '{message}' successfully sent to {recipient}"

def run_conversation():
    # Step 1: send the persona, conversation and available functions to the model
    messages = [
        {"role": "system", "content": """You are a warehouse assistant
        - Only answer questions related to warehouse, stock quantity on hand and writing memo.
        - You should only use the functions you have been provided with to answer enquiries.
        - If you're unsure of an answer, you can say 'Sorry, I don't have the information right now'."""
        },
        {"role": "user", "content": "Send the stock levels of iPad and keyboard in an instant message to Peter and remind him to attend tomorrow's meeting."},
    ]
    # fixing the dependencies of parallel functions
        # - Do not execute the send instant message function in parallel with other functions.
        # Send the stock levels of iPad and keyboard in an instant message to Peter and remind him to attend tomorrow's meeting about this.
    tools = [
        {
            "type": "function",
            "function": {
                "name": "check_stock",
                "description": "Get the quanity on hand of a given stock item",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "stockItem": {
                            "type": "string",
                            "description": "The stock item to get the quantity on hand for",
                        },
                    },
                    "required": ["stockItem"],
                },
            },
        },
        {
            "type": "function",
            "function": {
                "name": "send_instant_message",
                "description": "Send an instant message to a recipient",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "recipient": {
                            "type": "string",
                            "description": "The recipient of the instant message",
                        },
                        "message": {
                            "type": "string", 
                            "description": "The message to send",
                        },
                    },
                    "required": ["recipient","message"],
                },
            },
        }
    ]

    # Step 2: get the model's response
    response = client.chat.completions.create(
        model="gpt-3.5-turbo-1106",
        messages=messages,
        tools=tools,
        tool_choice="auto",
        temperature=0.0,
    )

    # Step 3: loop through the model's response until it doesn't ask for a tool call
    while response.choices[0].finish_reason == "tool_calls":
        response_message = response.choices[0].message
        print(f"\n\r\n\r{response_message}")
        tool_calls = response_message.tool_calls

        # Step 3: define functions mapping
        available_functions = {
            "check_stock": check_stock,
            "send_instant_message": send_instant_message,
        } 
        messages.append(response_message)  # extend conversation with assistant's reply

        # Step 5: Call each function in tool calls and send the funciton responses to the model
        for tool_call in tool_calls:
            function_name = tool_call.function.name
            function_to_call = available_functions[function_name]
            function_args = json.loads(tool_call.function.arguments)

            if function_name == "check_stock":
                function_response = check_stock(function_args.get("stockItem"))
            elif function_name == "send_instant_message":
                function_response = send_instant_message(
                    function_args.get("recipient"), 
                    function_args.get("message"))
            else:
                function_response = "Function not found"

            messages.append(
                {
                    "tool_call_id": tool_call.id,
                    "role": "tool",
                    "name": function_name,
                    "content": function_response,
                }
            )  # extend conversation with function response

        # Step 6: get a new response from the model where it can see the function responses
        response = client.chat.completions.create(
            model="gpt-3.5-turbo-1106",
            messages=messages,
            tools=tools,
            tool_choice="auto",
            temperature=0.0,
        )
    return response.choices[0].message

print(f"\n\r\n\r{run_conversation()}")
2 Likes