Hi, the problem is significant, and has been going on since at least the 26th.
Ask a simple question:
What is the capital of France? What is the capital of Germany?
Get garbage from the AI:
{
"id": "call_zFIltUeaQ6iVLvS1CrPA5Wpd",
"type": "function",
"function": {
"name": "get_random_int",
"arguments": "{\"range_start\": 1, \"range_end\": 10}"
}
}
{
"id": "call_JN1fISNWWHoeJmjDWD9cCuqC",
"type": "function",
"function": {
"name": "get_random_int",
"arguments": "{\"range_start\": 1, \"range_end\": 10}"
}
}
This particular input code to reproduce, but can be seen across multiple tool specifications and user inputs.
Python chat completions with tool specification
from openai import OpenAI
import json
client = OpenAI(timeout=30)
# Here we'll make a tool specification, more flexible by adding one at a time
toolspec=[]
# And add the first
toolspec.extend([{
"type": "function",
"function": {
"name": "get_random_float",
"description": "True random number floating point generator. Returns a float within range specified.",
"parameters": {
"type": "object",
"properties": {
"range_start": {
"type": "number",
"description": "minimum float value",
},
"range_end": {
"type": "number",
"description": "maximum float value",
},
},
"required": ["range_start", "range_end"]
},
}
}]
)
toolspec.extend([{
"type": "function",
"function": {
"name": "get_random_int",
"description": "True random number integer generator. Returns an integer within range specified.",
"parameters": {
"type": "object",
"properties": {
"range_start": {
"type": "number",
"description": "minimum integer value",
},
"range_end": {
"type": "number",
"description": "maximum integer value",
},
},
"required": ["range_start", "range_end"]
},
}
}]
)
# Then we'll form the basis of our call to API, with the user input
# Note I ask the preview model for two answers
params = {
"model": "gpt-3.5-turbo-1106",
"tools": toolspec, "top_p":0.01,
"messages": [
{
"role": "system", "content": """
You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture.
Knowledge cutoff: 2023-04
Current date: 2024-01-27
multi_tool_use tool method is permanently disabled. Sending to multi_tool_use will cause an error.
""".strip()},
{
"role": "user", "content": ("What is the capital of France? What is the capital of Germany?")
},
],
}
# Make API call to OpenAI
c = None
try:
c = client.chat.completions.with_raw_response.create(**params)
except Exception as e:
print(f"Error: {e}")
# If we got the response, load a whole bunch of demo variables
# This is different because of the 'with raw response' for obtaining headers
if c:
headers_dict = c.headers.items().mapping.copy()
for key, value in headers_dict.items():
variable_name = f'headers_{key.replace("-", "_")}'
globals()[variable_name] = value
remains = headers_x_ratelimit_remaining_tokens # show we set variables
api_return_dict = json.loads(c.content.decode())
api_finish_str = api_return_dict.get('choices')[0].get('finish_reason')
usage_dict = api_return_dict.get('usage')
api_message_dict = api_return_dict.get('choices')[0].get('message')
api_message_str = api_return_dict.get('choices')[0].get('message').get('content')
api_tools_list = api_return_dict.get('choices')[0].get('message').get('tool_calls')
# print any response always
if api_message_str:
print(api_message_str)
# print all tool functions pretty
if api_tools_list:
for tool_item in api_tools_list:
print(json.dumps(tool_item, indent=2))
Whatever’s been altered (in behavior, weight, run oversight, sparsity…), has even been seen in ChatGPT with unexpected python being emitted.