I am trying to do parallel function calling for a complex use case, but before that I wanted to try a simpler one and see how it works.
I have 2 functions and 2 respective tasks in my prompts (one for each expected function call)…
What I see happening is that only the 1st task results in a function call and the second task is completely ignored. Same happens when I reverse the order
import os
import openai
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
import json
# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
weather_info = {
"location": location,
"temperature": "72",
"unit": unit,
"forecast": ["sunny", "windy"],
}
return json.dumps(weather_info)
def return_a_joke(joke: str):
"""Get a joke from the llm and return it"""
return(joke)
# define a function
functions = [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
{
"name": "return_a_joke",
"description": "Get a joke from the LLM and return it",
"parameters": {
"type": "object",
"properties": {
"joke": {
"type": "string",
"description": "The joke to be returned"
}
},
"required": ["joke"]
}
}
]
messages = [
{
"role": "user",
"content": r"Answer 2 questions:\n\
1.Tell me a joke about IT. \n\
2.Also tell me Boston's weather"
}
]
import openai
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=functions
)
print(response)