When my assistant gets the output from a function with long text it doesn’t use that output to response to the user. Instead it makes reference to it like if the final user could also see the response of the function when it is actually internal. Here’s an example:
{
{
"role": "assistant",
"function_call": {
"name": "answer_question",
"arguments": {
"question": "what happened in catalonia during ww2"
}
},
"content": null
},
{
"role": "function",
"name": "answer_question",
"content": "During World War II, Catalonia was part of Spain under the rule of Francisco Franco. Although Spain declared neutrality, Catalonia was affected by the conflict. Italy and Germany had some interest in Catalonia before the war, but their attempts to establish a fascist movement failed. Barcelona was bombed by Italian planes supporting the Nationalist side in the Spanish Civil War. Catalan individuals, such as Joan Pujol and Josep Trueta, played roles in the Allied side, with Pujol acting as a double agent and Trueta organizing medical services. Some Catalans also fought on the Soviet side in the Eastern Front."
},
{
"role": "assistant",
"content": "Thank you for your question about Catalonia during WWII. Now, can you tell me the time period during which World War II took place?"
}
}
Code snippet to reproduce:
import openai
import json
openai.api_key = "YOUR_API_KEY"
# Example dummy function hard coded to return the same answer to the example question
def answer_question(question):
"""Get the answer to the student's question"""
answer = "During World War II, Catalonia was part of Spain under the rule of Francisco Franco. Although Spain declared neutrality, Catalonia was affected by the conflict. Italy and Germany had some interest in Catalonia before the war, but their attempts to establish a fascist movement failed. Barcelona was bombed by Italian planes supporting the Nationalist side in the Spanish Civil War. Catalan individuals, such as Joan Pujol and Josep Trueta, played roles in the Allied side, with Pujol acting as a double agent and Trueta organizing medical services. Some Catalans also fought on the Soviet side in the Eastern Front."
return answer
def run_conversation():
messages = [
{
"role": "system",
"content": "I want you to act as a history teacher who has to ask some questions to his students.\nQuestions: \n- When did WWII take place?\n- Which countries were the principal belligerents?\nYour primary focus is asking the provided questions but if the student asks any history questions, you will try to answer them using the provided functions.\n"
},
{
"role": "assistant",
"content": "Can I ask you some history questions?"
},
{
"role": "user",
"content": "what happened in Catalonia during WW2"
}
]
functions = [
{
"name": "answer_question",
"description": "Answers a question from the student that is outside the context of the agent's prompt",
"parameters": {
"type": "object",
"properties": {
"question": {
"description": "The question the student is asking",
"type": "string"
}
},
"required": ["question"]
}
}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
functions=functions,
function_call="auto",
)
response_message = response["choices"][0]["message"]
if response_message.get("function_call"):
available_functions = {
"answer_question": answer_question,
}
function_name = response_message["function_call"]["name"]
function_to_call = available_functions[function_name]
function_args = json.loads(response_message["function_call"]["arguments"])
function_response = function_to_call(
question=function_args.get("question"),
)
messages.append(response_message)
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
second_response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
)
return second_response
print(run_conversation())