I am unbelievably lost, I’m using a combination of so many different posts I’ve seen for this and cannot for the life of me figure out how to get function calling to work with streaming, so far this is what I’ve accomplished:
Streaming with no function calls
Streaming completions that always have function calls
Back to back function calls without streaming
However, I am still lost for how to stream a completion, with and without a function call, and have the entire thing in a loop until there is no more tool calls.
Basically I am trying to get the effect in ChatGPT where it streams the result, and can act upon calling functions as much as it wants with no limit.
Does anybody have an example that I can use that doesn’t have hardcoded function parameters in the tool call section?
This is the code I have currently, it will always return that there is no tool call no matter what, im way past my expertise and ChatGPT can only help so far, here’s the code:
from openai import OpenAI
import openai
import json
import math
from apikey import api_key
openai.api_key = api_key
client = OpenAI(api_key=api_key)
def perform_math(operations, operands_sets):
print("math function is running")
if not isinstance(operations, list) or not isinstance(operands_sets, list):
return json.dumps({"content": "Error: Both operations and operands_sets should be lists."})
if len(operations) != len(operands_sets):
return json.dumps({"content": "Error: Mismatch between number of operations and number of operand sets."})
responses = []
for operation, operands in zip(operations, operands_sets):
if not operands or not all(isinstance(op, (int, float)) for op in operands):
responses.append("Error: Invalid operands provided.")
continue
try:
if operation == "add":
result = sum(operands)
elif operation == "subtract":
result = operands[0] - sum(operands[1:])
elif operation == "multiply":
result = math.prod(operands)
elif operation == "divide":
result = operands[0]
for op in operands[1:]:
result /= op
elif operation == "power":
result = math.pow(operands[0], operands[1])
elif operation == "square_root":
if operands[0] < 0:
raise ValueError("Cannot take the square root of a negative number.")
result = math.sqrt(operands[0])
else:
raise ValueError("Invalid operation specified.")
except (ArithmeticError, ValueError) as e:
responses.append(f"Error in {operation}: {str(e)}")
continue
responses.append(f"{operation.capitalize()} result is {result}.")
final_response = " ".join(responses)
return json.dumps({"content": final_response})
def run_conversation():
messages = [{"role": "user", "content": "What is 90^1.2 and who invented the lightbulb?"}]
tools = [
{
"type": "function",
"function": {
"name": "perform_math",
"description": "Perform multiple math operations. Specify the operations and the sets of numbers to perform them on.",
"parameters": {
"type": "object",
"properties": {
"operations": {
"type": "array",
"items": {
"type": "string",
"enum": ["add", "subtract", "multiply", "divide", "power", "square_root"]
},
"description": "The list of math operations to perform"
},
"operands_sets": {
"type": "array",
"items": {
"type": "array",
"items": {
"type": "number"
}
},
"description": "The list of number sets to perform the operations on. Use decimals and whole numbers only."
}
},
"required": ["operations", "operands_sets"]
}
}
}
]
stream = client.chat.completions.create(
model="gpt-3.5-turbo-0125",
messages=messages,
tools=tools,
tool_choice="auto", # auto is default, but we'll be explicit
stream=True,
)
available_functions = {
"perform_math": perform_math,
} # only one function in this example, but you can have multiple
response_text = ""
tool_calls = []
for chunk in stream:
delta = chunk.choices[0].delta
if delta and delta.content:
# content chunk -- send to browser and record for later saving
print(delta.content)
response_text += delta.content
elif delta and delta.tool_calls:
tcchunklist = delta.tool_calls
for tcchunk in tcchunklist:
if len(tool_calls) <= tcchunk.index:
tool_calls.append({"id": "", "type": "function", "function": { "name": "", "arguments": "" }})
tc = tool_calls[tcchunk.index]
if tcchunk.id:
tc["id"] += tcchunk.id
if tcchunk.function.name:
tc["function"]["name"] += tcchunk.function.name
if tcchunk.function.arguments:
tc["function"]["arguments"] += tcchunk.function.arguments
# Process tool calls if any
if tool_calls:
for tool_call in tool_calls:
# Extract function name for each tool call
function_name = tool_call['function']['name']
# Check if the function exists in the available functions; skip if not found
if function_name in available_functions:
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call['function']['arguments'])
# Attempt to call the function with the provided arguments
try:
function_response = function_to_call(**function_args)
# Construct the response for the tool call
tool_response = {
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response
}
# Append the response to messages
messages.append(tool_response)
except TypeError as e:
print(f"Error calling function {function_name} with args {function_args}: {e}")
else:
print("No tool calls to process.")
stream = client.chat.completions.create(
model="gpt-3.5-turbo-0125",
messages=messages,
stream=True,
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="", flush=True)
run_conversation()