Help with OpenAI Assistant Function Calling Issues

For the past month, I’ve been trying to make the OpenAI Assistant’s function calling work but keep running into an issue that I can’t seem to resolve. Despite following the documentation closely and searching everywhere for similar problems, I’m still stuck. I’ve even found some forum posts addressing similar issues but none have resolved my problem.


What I’m Doing

I’ve built a simple script to test the function API with OpenAI’s Assistant, and I’m using the Yr API for weather data fetching (free and doesn’t require an API key). Here’s a summarized version of the code:

import requests
import random
import time
import json

class YrService:
    HEADERS = {"User-Agent": "WeatherClient/1.0"}
    
    def __init__(self):
        print("Initializing YrService.")
        self.client = None
        try:
            from yr_weather import Locationforecast
            self.client = Locationforecast(headers=self.HEADERS, use_cache=False)
            print("YrService initialized successfully.")
        except ImportError as e:
            print("Failed to import yr_weather. Make sure the library is installed.", e)

    def get_forecast(self, latitude, longitude):
        if not self.client:
            print("YrService client not initialized.")
            return None
        
        print(f"Fetching forecast for coordinates: ({latitude}, {longitude})")
        try:
            forecast = self.client.get_forecast(
                lat=latitude, lon=longitude, forecast_type="compact"
            )
            print("Raw forecast data:", forecast.__dict__)
            return forecast.__dict__
        except Exception as e:
            print("Error fetching forecast:", e)
            return None


def get_coordinates(location):
    print(f"Mock geocoding for location: {location}")
    return random.uniform(-90, 90), random.uniform(-180, 180)


class OpenAIService:
    def __init__(self, api_key):
        self.api_key = api_key
        self.base_url = "https://api.openai.com/v1"
        self.headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json",
            "OpenAI-Beta": "assistants=v2",
        }

    def ask_weather(self, thread_id, assistant_id, location):
        print(f"Asking OpenAI about the weather in {location}.")
        message_content = f"What is the weather in {location}?"
        self.add_message_to_thread(thread_id, message_content)
        run_id = self.create_run(thread_id, assistant_id)
        self.poll_and_handle_action(thread_id, run_id, location)

    def create_thread(self):
        print("Creating a new OpenAI thread.")
        url = f"{self.base_url}/threads"
        response = requests.post(url, headers=self.headers)
        response.raise_for_status()
        thread_id = response.json()["id"]
        print(f"Created thread ID: {thread_id}")
        return thread_id

    def add_message_to_thread(self, thread_id, message_content):
        print(f"Adding message to thread ID {thread_id}: {message_content}")
        url = f"{self.base_url}/threads/{thread_id}/messages"
        data = {"role": "user", "content": message_content}
        response = requests.post(url, headers=self.headers, json=data)
        response.raise_for_status()

    def create_run(self, thread_id, assistant_id):
        print(f"Creating run for thread ID {thread_id} with assistant ID {assistant_id}.")
        url = f"{self.base_url}/threads/{thread_id}/runs"
        data = {"assistant_id": assistant_id}
        response = requests.post(url, headers=self.headers, json=data)
        response.raise_for_status()
        run_id = response.json()["id"]
        print(f"Created run ID: {run_id}")
        return run_id

    def poll_and_handle_action(self, thread_id, run_id, location):
        print(f"Polling run ID {run_id} for completion.")
        url = f"{self.base_url}/threads/{thread_id}/runs/{run_id}"
        while True:
            response = requests.get(url, headers=self.headers)
            response.raise_for_status()
            data = response.json()
            status = data["status"]
            print(f"Run status: {status}")
            
            if status == "requires_action":
                print("Action required. Handling action...")
                self.handle_requires_action(data, thread_id, run_id, location)
                break
            elif status in ["completed", "failed"]:
                print("Run completed or failed.")
                break
            time.sleep(2)

    def handle_requires_action(self, data, thread_id, run_id, location):
        tool_outputs = []
        tool_calls = data.get("required_action", {}).get("submit_tool_outputs", {}).get("tool_calls", [])
        print("Tool calls found:", tool_calls)
        
        if not tool_calls:
            print("No tool calls available in required_action.")
            return
        
        for tool in tool_calls:
            print("Processing tool call:", tool)
            if tool["function"]["name"] == "fetch_weather":
                # Parse the location from the function arguments
                arguments = tool["function"].get("arguments", "{}")
                try:
                    location_data = json.loads(arguments)
                    location = location_data.get("location", location)
                except json.JSONDecodeError:
                    print("Failed to decode arguments.")
                    location = location
                
                lat, lon = get_coordinates(location)
                yr_service = YrService()
                weather_data = yr_service.get_forecast(lat, lon)
                
                if weather_data and "_timeseries" in weather_data:
                    # Extract temperature and rain probability
                    temperature = weather_data.get("_timeseries", [{}])[0].get("data", {}).get("instant", {}).get("details", {}).get("air_temperature", "N/A")
                    rain_probability = weather_data.get("_timeseries", [{}])[0].get("data", {}).get("next_1_hours", {}).get("details", {}).get("precipitation_amount", "N/A")
                    
                    # Combine results into a single output
                    weather_output = {
                        "temperature": temperature,
                        "rain_probability": rain_probability,
                    }
                else:
                    print("Weather data is incomplete or missing.")
                    weather_output = {"error": "Weather data unavailable"}
                
                print(f"Retrieved weather data: {weather_output}")
                tool_outputs.append({"tool_call_id": tool["id"], "output": json.dumps(weather_output)})
        
        print("Final tool outputs to submit:", tool_outputs)
        self.submit_tool_outputs(thread_id, run_id, tool_outputs)

    def submit_tool_outputs(self, thread_id, run_id, tool_outputs):
        if not tool_outputs:
            print("No valid tool outputs to submit.")
            return
        
        url = f"{self.base_url}/threads/{thread_id}/runs/{run_id}/submit-tool-outputs"
        data = {"tool_outputs": tool_outputs}
        print(f"Submitting tool outputs to URL: {url}")
        print(f"Payload: {json.dumps(data, indent=2)}")
        response = requests.post(url, headers=self.headers, json=data)
        try:
            response.raise_for_status()
            print("Tool outputs submitted successfully.")
        except requests.exceptions.HTTPError as e:
            print(f"Error submitting tool outputs: {response.text}")
            raise


if __name__ == "__main__":
    OPENAI_API_KEY = "whatever key here"
    ASSISTANT_ID = "assistant id here"
    LOCATION = "Oslo"  # Example location
    
    print("Initializing OpenAI service.")
    openai_service = OpenAIService(api_key=OPENAI_API_KEY)
    print("Creating new OpenAI thread.")
    thread_id = openai_service.create_thread()
    print("Querying OpenAI for weather.")
    openai_service.ask_weather(thread_id, ASSISTANT_ID, LOCATION)

Issue Encountered

When submitting tool outputs, I receive the following error:

{
  "error": {
    "message": "Invalid URL (POST /v1/threads/thread_ad7J64j4Wsyv4Y2mSishv8hS/runs/run_vgRUOGMAV1sMpRWnZAx3PRZf/submit-tool-outputs)",
    "type": "invalid_request_error",
    "param": null,
    "code": null
  }
}

What I’ve Tried

  1. Following the Documentation: Referenced the Assistant API guide step-by-step.
  2. Adjusting API Requests: Experimented with including/excluding requestId in the URL.
  3. Using Chat Completions API: This works perfectly, but I need the Assistant API for my application.
  4. Tried with Minimal Examples: Created a simplified test file to isolate the issue.
  5. Referencing Other Forum Posts: Encountered similar reports, but no definitive solutions.

Observations

  • Polling Behavior: The run remains in the “requires_action” state indefinitely when submitting JSON.
  • Plain Text Submission: Submitting plain text outputs results in the same error as above.
  • Yr API Integration: Weather data fetching itself works fine, and the output is as expected (mock geocoding + weather details).
  • Error Consistency: Regardless of request modifications, the error persists.

Question

Does anyone know the proper way to submit function outputs to the Assistant API?
Is there a parameter I might be missing or something critical I’m overlooking?

I’ve attached the test script (test.py) and included the Yr API for reference. If you want to replicate, you only need:

  • OpenAI API key (for all tests)

Playground Behavior

In the OpenAI playground, submitting JSON outputs keeps the run going indefinitely until canceled. Submitting outputs as plain text results in an error, as shown in the attached screenshot:


Help Needed

I’d greatly appreciate guidance on resolving this issue. If you’ve encountered something similar, please share any insights. Let me know if you need more details or logs.

Thanks in advance!

Is there a special reason you are not using the OpenAI library? That would make things much easier I think ?

I’m not a python developer - did mine in php. However, a couple tips:

1. OpenAI Library: As @jlvanhulst suggests, the OpenAI Library is highly recommended by most. I chose not to use it, because I wanted to understand everything coded and how it works, while also minimizing extraneous / unnecessary code.

2. Ask o1 Pro & Claude: Whenever I hit persistent problems, I ask OpenAI and it usually gets it. Ironically, OpenAI couldn’t solve one issue for me. I tried Claude it and actually solved it. I am not advocating Claude as OpenAI is definitely my preferred solution. But just like humans, sometimes and outsider perspective is needed. :wink:

Good luck!

1 Like

i know its easyer but the library lacks the methods for manipulation of threads and runs by specific ids, and that part of the logic is needed elswhere in the application

I discovered that the endpoint for submitting tool outputs should be submit_tool_outputs (underscores instead of hyphens). This resolves the previous Invalid URL error. However, now I am facing a new problem:

After submitting tool outputs, I never receive a response back from the assistant. The code seems to execute without any errors, but the assistant does not provide any replies in the thread.


Observations

  1. Tool Outputs Submission: Submitting tool outputs with the corrected endpoint (submit_tool_outputs ) works without errors.
  2. No Assistant Response: After submission, the assistant does not generate a response.
  3. Thread Messages: Even when polling for messages in the thread, no new assistant responses appear.

Question

Does anyone know why the assistant might not be generating responses after tool outputs are submitted? Is there something additional required to prompt a reply?

i added i the system prompts clear instructions, also here is the output:

Submitting tool outputs to URL: https://api.openai.com/v1/threads/thread_0PvurCxaqvprYMWjUoVriyay/runs/run_xDF75fNfpqLikkhrsFxbxTAl/submit_tool_outputs
Payload: {
“tool_outputs”: [
{
“tool_call_id”: “call_luL3FE6gG5jrsP0kNFaETwE5”,
“output”: “temperature: -21.4, rain_probability: 0.0”
}
]
}
Tool outputs submitted successfully.
Checking for assistant response in thread ID thread_0PvurCxaqvprYMWjUoVriyay.
No assistant responses found.
Run status: completed
Run completed.
Checking for assistant response in thread ID thread_0PvurCxaqvprYMWjUoVriyay.
No assistant responses found.

You will need to poll until there is output?

1 Like

I am pooling till run is complete then I poll again the output is truncated for brevity,
I’ve also noticed that the recent changes made sometimes threads not return a previous message after completion of a run, I’ve found a solution for this and will run it later today and post the results

Fix for OpenAI Weather Assistant Issue

This fix addresses the issue with handling OpenAI Assistants API for weather-related queries. The updated code now works flawlessly and adheres to the system prompt and tool definitions. Here’s the complete solution:


Updated Python Code

import requests
import random
import time
import json

class YrService:
    HEADERS = {"User-Agent": "WeatherClient/1.0"}

    def __init__(self):
        print("Initializing YrService.")
        self.client = None
        try:
            from yr_weather import Locationforecast
            self.client = Locationforecast(headers=self.HEADERS, use_cache=False)
            print("YrService initialized successfully.")
        except ImportError as e:
            print("Failed to import yr_weather. Make sure the library is installed.", e)

    def get_forecast(self, latitude, longitude):
        if not self.client:
            print("YrService client not initialized.")
            return None

        print(f"Fetching forecast for coordinates: ({latitude}, {longitude})")
        try:
            forecast = self.client.get_forecast(
                lat=latitude, lon=longitude, forecast_type="compact"
            )
            # Copy the forecast.__dict__ so we can safely modify it
            data_copy = forecast.__dict__.copy()

            # Stringify or remove non-serializable fields (like geometry)
            if "geometry" in data_copy:
                data_copy["geometry"] = str(data_copy["geometry"])

            print("Raw forecast data:", data_copy)
            return data_copy

        except Exception as e:
            print("Error fetching forecast:", e)
            return None

def get_coordinates(location):
    print(f"Mock geocoding for location: {location}")
    return random.uniform(-90, 90), random.uniform(-180, 180)

class OpenAIService:
    def __init__(self, api_key):
        self.api_key = api_key
        self.base_url = "https://api.openai.com/v1"
        self.headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json",
            "OpenAI-Beta": "assistants=v2",
        }

    def ask_weather(self, thread_id, assistant_id, location):
        print(f"Asking OpenAI about the weather in {location}.")
        message_content = f"What is the weather in {location}?"
        self.add_message_to_thread(thread_id, message_content)
        run_id = self.create_run(thread_id, assistant_id)
        self.poll_and_handle_action(thread_id, run_id, location)

    def create_thread(self):
        print("Creating a new OpenAI thread.")
        url = f"{self.base_url}/threads"
        response = requests.post(url, headers=self.headers)
        print(f"Response Status Code: {response.status_code}")
        print(f"Response Body: {response.text}")
        response.raise_for_status()
        thread_id = response.json()["id"]
        print(f"Created thread ID: {thread_id}")
        return thread_id

    def add_message_to_thread(self, thread_id, message_content):
        print(f"Adding message to thread ID {thread_id}: {message_content}")
        url = f"{self.base_url}/threads/{thread_id}/messages"
        data = {"role": "user", "content": message_content}
        response = requests.post(url, headers=self.headers, json=data)
        response.raise_for_status()

    def create_run(self, thread_id, assistant_id):
        print(f"Creating run for thread ID {thread_id} with assistant ID {assistant_id}.")
        url = f"{self.base_url}/threads/{thread_id}/runs"
        data = {"assistant_id": assistant_id}
        response = requests.post(url, headers=self.headers, json=data)
        response.raise_for_status()
        run_id = response.json()["id"]
        print(f"Created run ID: {run_id}")
        return run_id

    def poll_and_handle_action(self, thread_id, run_id, location):
        print(f"Polling run ID {run_id} for completion.")
        url = f"{self.base_url}/threads/{thread_id}/runs/{run_id}"
        while True:
            response = requests.get(url, headers=self.headers)
            response.raise_for_status()
            data = response.json()
            status = data["status"]
            print(f"Run status: {status}")

            if status == "completed":
                print("Run completed.")
                # EPHEMERAL-FIRST LOGIC:
                ephemeral_responses = self.get_ephemeral_assistant_messages(thread_id, run_id)
                if ephemeral_responses:
                    print("[Ephemeral Assistant Replies]")
                    for reply in ephemeral_responses:
                        print(f"Assistant ephemeral reply: {reply}")
                else:
                    # If no ephemeral messages, fallback to standard
                    self.display_response(data)
                break

            elif status == "requires_action":
                print("Action required. Handling action...")
                self.handle_requires_action(data, thread_id, run_id, location)

            elif status == "failed":
                print("Run failed.")
                break

            time.sleep(2)

        # Also check for any new assistant messages after tool outputs
        self.check_for_assistant_response(thread_id)

    def display_response(self, data):
        messages = data.get("messages", [])
        for message in messages:
            if message.get("role") == "assistant":
                print(f"Assistant response: {message.get('content')}")

    def handle_requires_action(self, data, thread_id, run_id, location):
        tool_outputs = []
        tool_calls = data.get("required_action", {}).get("submit_tool_outputs", {}).get("tool_calls", [])
        print("Tool calls found:", tool_calls)

        if not tool_calls:
            print("No tool calls available in required_action.")
            return

        for tool in tool_calls:
            print("Processing tool call:", tool)
            if tool["function"]["name"] == "fetch_weather":
                arguments = tool["function"].get("arguments", "{}")
                try:
                    location_data = json.loads(arguments)
                    location = location_data.get("location", "Unknown location")
                except json.JSONDecodeError:
                    print("Failed to decode arguments.")
                    location = "Unknown location"

                lat, lon = get_coordinates(location)
                yr_service = YrService()
                weather_data = yr_service.get_forecast(lat, lon)

                if weather_data and "_timeseries" in weather_data:
                    temperature = (
                        weather_data.get("_timeseries", [{}])[0]
                        .get("data", {})
                        .get("instant", {})
                        .get("details", {})
                        .get("air_temperature", "N/A")
                    )
                    rain_probability = (
                        weather_data.get("_timeseries", [{}])[0]
                        .get("data", {})
                        .get("next_1_hours", {})
                        .get("details", {})
                        .get("precipitation_amount", "N/A")
                    )

                    # Convert to plain text format
                    weather_output = f"temperature: {temperature}, rain_probability: {rain_probability}"
                else:
                    print("Weather data is incomplete or missing.")
                    weather_output = "error: Weather data unavailable"

                print(f"Retrieved weather data: {weather_output}")
                tool_outputs.append({"tool_call_id": tool["id"], "output": weather_output})

        print("Final tool outputs to submit:", tool_outputs)
        self.submit_tool_outputs(thread_id, run_id, tool_outputs)

    def submit_tool_outputs(self, thread_id, run_id, tool_outputs):
        if not tool_outputs:
            print("No valid tool outputs to submit.")
            return

        url = f"{self.base_url}/threads/{thread_id}/runs/{run_id}/submit_tool_outputs"
        data = {"tool_outputs": tool_outputs}
        print(f"Submitting tool outputs to URL: {url}")
        print(f"Payload: {json.dumps(data, indent=2)}")
        response = requests.post(url, headers=self.headers, json=data)
        try:
            response.raise_for_status()
            print("Tool outputs submitted successfully.")
            # Potentially ephemeral messages might appear now
            # We keep checking for new assistant messages in the final poll loop
        except requests.exceptions.HTTPError as e:
            print(f"Error submitting tool outputs: {response.text}")
            raise

    def check_for_assistant_response(self, thread_id):
        print(f"Checking for assistant response in thread ID {thread_id}.")
        url = f"{self.base_url}/threads/{thread_id}/messages"
        response = requests.get(url, headers=self.headers)
        response.raise_for_status()
        messages = response.json().get("messages", [])

        assistant_responses = [
            message.get("content") for message in messages if message.get("role") == "assistant"
        ]
        if assistant_responses:
            print("Assistant responses found:")
            for resp in assistant_responses:
                print(f"Assistant response: {resp}")
        else:
            print("No assistant responses found.")

    # ============= NEW EPHEMERAL HELPER =============
    def get_ephemeral_assistant_messages(self, thread_id, run_id):
        """
        1) Retrieve run steps
        2) For each 'message_creation' step that leads to an 'assistant' message,
           fetch ephemeral content
        3) Return all ephemeral replies
        """
        ephemeral_replies = []
        steps_url = f"{self.base_url}/threads/{thread_id}/runs/{run_id}/steps"
        steps_resp = requests.get(steps_url, headers=self.headers)
        steps_resp.raise_for_status()
        steps_list = steps_resp.json().get("data", [])

        for step in steps_list:
            if step.get("type") == "message_creation":
                msg_id = step.get("step_details", {}).get("message_creation", {}).get("message_id")
                if msg_id:
                    ephemeral_text = self.retrieve_ephemeral_message(thread_id, msg_id)
                    if ephemeral_text:
                        ephemeral_replies.append(ephemeral_text)
        return ephemeral_replies

    def retrieve_ephemeral_message(self, thread_id, message_id):
        url = f"{self.base_url}/threads/{thread_id}/messages/{message_id}"
        resp = requests.get(url, headers=self.headers)
        resp.raise_for_status()
        msg_dict = resp.json()

        if msg_dict.get("role") == "assistant":
            return self.extract_text(msg_dict.get("content", []))
        return None

    def extract_text(self, content_blocks):
        parts = []
        for block in content_blocks:
            if block.get("type") == "text":
                text_val = block.get("text", {}).get("value", "")
                parts.append(text_val)
        return " ".join(parts).strip()

if __name__ == "__main__":
    OPENAI_API_KEY = "your api key"
    ASSISTANT_ID = "your assistant id"
    LOCATION = "Oslo"  # Example location

    print("Initializing OpenAI service.")
    openai_service = OpenAIService(api_key=OPENAI_API_KEY)
    print("Creating new OpenAI thread.")
    thread_id = openai_service.create_thread()
    print("Querying OpenAI for weather.")
    openai_service.ask_weather(thread_id, ASSISTANT_ID, LOCATION)

Assumptions

System Prompt

The system prompt assumes the following instructions:

  1. After receiving tool outputs, you must generate a response based on the data fetched and share it with the user in a clear and concise manner, adhering to the same language as the user query.

  2. If asked about the weather, include the temperature and rain probability in the response. For example:
    “The temperature in [Location] is [temperature]°C with a [rain_probability]% chance of rain.”

Tool Definition

The function definition for fetch_weather used is:

{
  "name": "fetch_weather",
  "description": "Fetches weather information for a given location and date.",
  "strict": false,
  "parameters": {
    "type": "object",
    "properties": {
      "location": {
        "type": "string",
        "description": "Name of the location (e.g., 'New York')."
      }
    },
    "required": ["location"]
  }
}
1 Like

If someone has a better implementation maybe even using the library he could share please do. Keep in mind to return also the run and thread ids,