How Can I use threadID, assistants, and function tool calling at the same time?

Hi all, I am trying to create an AI chatbot (using the assistant’s API that answers user questions (so it would be a continuous conversation). The chatbot should call API requests if the user asks for something specific and continue the conversation with what it received from the API call. My understanding is that to keep a continuous conversation going, I need to create a thread id for the conversation (I have actually done this in the second code snippet but without the function tools calling features). However, I’m having trouble integrating function tools calling with threadIDs at the same time. I can do each separately but I’m having trouble combining both functionalities. In the code below I want the user to get answers about my restaurant and when they ask about reservation times or weather, I would use external APIs to answer them. Here’s my code:

require('dotenv').config(); // This should be at the top of your file

const { OpenAI } = require('openai');
const readline = require('readline');
// const { threadId } = require('worker_threads');
const openai = new OpenAI(process.env.OPENAI_API_KEY);

// Example dummy function hard coded to return the same weather
// In production, this could be your backend API or an external API
function getCurrentWeather(location) {
    if (location.toLowerCase().includes("tokyo")) {
      return JSON.stringify({ location: "Tokyo", temperature: "10", unit: "celsius" });
    } else if (location.toLowerCase().includes("san francisco")) {
      return JSON.stringify({ location: "San Francisco", temperature: "72", unit: "fahrenheit" });
    } else if (location.toLowerCase().includes("paris")) {
      return JSON.stringify({ location: "Paris", temperature: "22", unit: "fahrenheit" });
    } else {
      return JSON.stringify({ location, temperature: "unknown" });
    }
  }

function get_table_reservations(bookingTime, numGuests) {
  if (bookingTime.toLowerCase().includes("4:30")) {
    return JSON.stringify({ availability: "Not available"});
  }
  else if (!bookingTime) {
    return JSON.stringify({ availability: "Please include a booking time"});
  }
  else {
    return JSON.stringify({ availability: "Available", forGuests: numGuests});
}
}

const rl = readline.createInterface({
  input: process.stdin,
  output: process.stdout
});

const availableFunctions = {
  get_current_weather: getCurrentWeather,
  get_table_reservations: get_table_reservations
};

async function askQuestion(query) {
  return new Promise((resolve) => rl.question(query, resolve));
}
let threadId = '';

async function sendQuestion(userQuestion) {
    let messages = []; // Initialize messages array
    const assistantId = 'MY_ASSISTANT_ID'; 


    if (threadId.trim().length === 0) {
        // ThreadId is empty
        console.log("ThreadId is empty");
        const thread = await openai.beta.threads.create();
        threadId = thread.id;
    } else {
        // ThreadId is not empty
        console.log("ThreadId is not empty");
    }

    messages.push({ role: "user", content: userQuestion });

    const tools = [
        {
          type: "function",
          function: {
            name: "get_current_weather",
            description: "Get the current weather in a given location",
            parameters: {
              type: "object",
              properties: {
                location: {
                  type: "string",
                  description: "The city and state, e.g. San Francisco, CA",
                },
                unit: { type: "string", enum: ["celsius", "fahrenheit"] },
              },
              required: ["location"],
            },
          },
        },
        {
          type: "function",
          function: {
            name: "get_table_reservations",
            description: "Tell the user if a table is available for the number of guests and time they request",
            parameters: {
              type: "object",
              properties: {
                numGuests: {
                  type: "integer",
                  description: "The number of guests",
                },
                bookingTime: { type: "string", description: "The time requested for a reservation, eg. 8:30 PM" },
              },
              required: ["numGuests", "bookingTime"],
            },
          },
        },
      ];

    const response = await openai.chat.completions.create(threadId, {
      model: "gpt-3.5-turbo-1106",
      messages: messages,
      tools: tools,
      tool_choice: "auto",
    });

    const responseMessage = response.choices[0].message;


  // Step 2: check if the model wanted to call a function
  const toolCalls = responseMessage.tool_calls;
  if (responseMessage.tool_calls) {
    // Step 3: call the function
    // Note: the JSON response may not always be valid; be sure to handle errors
    const availableFunctions = {
      get_current_weather: getCurrentWeather,
      get_table_reservations: get_table_reservations
    }; // only one function in this example, but you can have multiple
    messages.push(responseMessage); // extend conversation with assistant's reply
    for (const toolCall of toolCalls) {
      const functionName = toolCall.function.name;
      const functionToCall = availableFunctions[functionName];
      const functionArgs = JSON.parse(toolCall.function.arguments);
      console.log('Arguments:', toolCall.function.arguments, 'name:', functionName); // Add this line to debug
      const functionResponse = functionToCall(
        functionArgs.bookingTime,
        functionArgs.numGuests
      );
      messages.push({
        tool_call_id: toolCall.id,
        role: "tool",
        name: functionName,
        content: functionResponse,
      }); // extend conversation with function response
    }
    const secondResponse = await openai.chat.completions.create({
      model: "gpt-3.5-turbo-1106",
      messages: messages,
    }); 
    // get a new response from the model where it can see the function response
    // console.log(secondResponse.choices);
    return secondResponse.choices;
  }

        //   // Regardless of tool calls, always add the latest model response to the conversation history
        //   try{
        //   messages.push({ role: "assistant", content: responseMessage.content });
        // } catch (error) {
        //   console.error("Error:", error); // Log any errors that occur during the process
        //   break; // Optionally break the loop on error
        // }
}

async function runConversation() {

  while (true) { // Loop indefinitely
    const userInput = await askQuestion("You: "); // Get user input
    if (userInput.toLowerCase() === "exit") break; // Break loop if user types "exit"

    responseToUser = await sendQuestion(userInput);
    console.log(responseToUser);

  }

  rl.close();

}



runConversation().catch(console.error);


Unfortunately, running this code is giving me the following error:

PS C:\Users\matth\Documents\Flutter App Development\TeeScanFire\AI Chatbot> node testServer2.js
You: can I reserve for 4:30
ThreadId is empty
BadRequestError: 400 We could not parse the JSON body of your request. (HINT: This likely means you aren't using your HTTP library correctly. The OpenAI API expects a JSON payload, but what was sent was not valid JSON. If you have trouble figuring out how to fix this, please contact us through our help center at help.openai.com.)
    at Function.generate (C:\Users\matth\Documents\Flutter App Development\TeeScanFire\AI Chatbot\node_modules\openai\error.js:44:20)
    at OpenAI.makeStatusError (C:\Users\matth\Documents\Flutter App Development\TeeScanFire\AI Chatbot\node_modules\openai\core.js:251:33)
    at OpenAI.makeRequest (C:\Users\matth\Documents\Flutter App Development\TeeScanFire\AI Chatbot\node_modules\openai\core.js:290:30)
    at processTicksAndRejections (node:internal/process/task_queues:96:5)
    at async sendQuestion (C:\Users\matth\Documents\Flutter App Development\TeeScanFire\AI Chatbot\testServer2.js:105:22)
    at async runConversation (C:\Users\matth\Documents\Flutter App Development\TeeScanFire\AI Chatbot\testServer2.js:165:22) {
  status: 400,
  headers: {
    'alt-svc': 'h3=":443"; ma=86400',
    'cf-cache-status': 'DYNAMIC',
    'cf-ray': '85ceb92c3bd2d009-SJC',
    connection: 'keep-alive',
    'content-length': '443',
    'content-type': 'application/json; charset=utf-8',
    date: 'Thu, 29 Feb 2024 06:06:47 GMT',
    server: 'cloudflare',
    'set-cookie': '__cf_bm=P8i0UPrV4201jrZBTTOvekbdepGaVsshbT2MtzNdlIo-1709186807-1.0-AVyhhk61lcloscOBj+B0q2g9oteGwLgYbEo+Lcc4jnpffkYUTOQfFR3jE1akiVMRCIotqKxWa1UfhSEIuHECLQg=; path=/; expires=Thu, 29-Feb-24 06:36:47 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=HxVe63Z7Datr6wjqCCLFLT1AM.pq1n9NA5RMjWYSMc8-1709186807758-0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None',
    'strict-transport-security': 'max-age=15724800; includeSubDomains',
    vary: 'Origin',
    'x-request-id': 'req_5dfb3c0df286439a83c2c7a6d435a75a'
  },
  error: {
    message: "We could not parse the JSON body of your request. (HINT: This likely means you aren't using your HTTP library correctly. The OpenAI API expects a JSON payload, but what was sent was not valid JSON. If you have trouble figuring out how to fix this, please contact us through our help center at help.openai.com.)",
    type: 'invalid_request_error',
    param: null,
    code: null
  },
  code: null,
  param: null,
  type: 'invalid_request_error'
}

The thing is I can create a chatbot that works with threadIDs and has a continual conversation with the user just fine but when I try to combine both is when I get to a problem. Here’s the code that works fine for me (receives and sends responses to frontend):

const express = require('express');
const { OpenAI } = require('openai');
const cors = require('cors');
require('dotenv').config();

const app = express();
app.use(cors());
app.use(express.json());

const openai = new OpenAI(process.env.OPENAI_API_KEY);

app.post('/get-response', async (req, res) => {
    const userMessage = req.body.message;
    let threadId = req.body.threadId; // Receive threadId from the client
    const assistantId = 'MY_ASSISTANT_ID'; // Replace with your actual assistant ID

    // If no threadId or it's a new session, create a new thread
    if (!threadId) {
        const thread = await openai.beta.threads.create();
        threadId = thread.id;
    }

    await openai.beta.threads.messages.create(threadId, {
        role: "user",
        content: userMessage,
    });


    // Use runs to wait for the assistant response and then retrieve it
    const run = await openai.beta.threads.runs.create(threadId, {
        assistant_id: assistantId,
    });

    let runStatus = await openai.beta.threads.runs.retrieve(
        threadId,
        run.id
      );

      // Polling mechanism to see if runStatus is completed
      // This should be made more robust.
      while (runStatus.status !== "completed") {
        await new Promise((resolve) => setTimeout(resolve, 2000));
        runStatus = await openai.beta.threads.runs.retrieve(threadId, run.id);
      }


  //     //CHECKING FOR TABLE RESERVATION:
  //         // If the model output includes a function call
  //   if (runStatus.status === 'requires_action') {
  //     // You might receive an array of actions, iterate over it
  //     for (const action of runStatus.required_action.submit_tool_outputs.tool_calls) {
  //         const functionName = action.function.name;
  //         const arguments = JSON.parse(action.function.arguments);
          
  //         // Check if the function name matches 'table_reservation'
  //         if (functionName === 'table_reservation') {
  //             handleTableReservation(arguments);
  //             // Respond back to the model that the action has been handled
  //             await openai.beta.threads.runs.submit_tool_outputs(threadId, run.id, {
  //                 tool_outputs: [{
  //                     tool_call_id: action.id,
  //                     output: { success: true } // You can include more details if needed
  //                 }]
  //             });
  //         }
  //     }
  // }


      // Get the last assistant message from the messages array
      const messages = await openai.beta.threads.messages.list(threadId);

      // Find the last message for the current run
      const lastMessageForRun = messages.data
        .filter(
          (message) => message.run_id === run.id && message.role === "assistant"
        )
        .pop();

      // If an assistant message is found, console.log() it
      assistantMessage = ""
      if (lastMessageForRun) {
        assistantMessage = lastMessageForRun.content[0].text.value
        console.log(`${assistantMessage} \n`);
      }
    
    res.json({ message: assistantMessage, threadId: threadId });
});

const PORT = 3001;
app.listen(PORT, () => console.log(`Server listening on port ${PORT}`));

As I was previously saying, I am able to create function tools calling with one of my assistants but not with threadId’s involved. Here’s the code that works just fine for doing that:

require('dotenv').config(); // This should be at the top of your file

const { OpenAI } = require('openai');
const openai = new OpenAI(process.env.OPENAI_API_KEY);


// Example dummy function hard coded to return the same weather
// In production, this could be your backend API or an external API
function getCurrentWeather(location) {
  if (location.toLowerCase().includes("tokyo")) {
    return JSON.stringify({ location: "Tokyo", temperature: "10", unit: "celsius" });
  } else if (location.toLowerCase().includes("san francisco")) {
    return JSON.stringify({ location: "San Francisco", temperature: "72", unit: "fahrenheit" });
  } else if (location.toLowerCase().includes("paris")) {
    return JSON.stringify({ location: "Paris", temperature: "22", unit: "fahrenheit" });
  } else {
    return JSON.stringify({ location, temperature: "unknown" });
  }
}

function get_table_reservations(bookingTime, numGuests) {
  if (bookingTime.toLowerCase().includes("4:30")) {
    return JSON.stringify({ availability: "Not available"});
  }
  else if (!bookingTime) {
    return JSON.stringify({ availability: "Please include a booking time"});
  }
  else {
    return JSON.stringify({ availability: "Available", forGuests: numGuests});
}
}


async function runConversation() {
  // Step 1: send the conversation and available functions to the model
  const messages = [
    { role: "user", content: "I want a table reservation for 3 people." },
  ];
  const tools = [
    {
      type: "function",
      function: {
        name: "get_current_weather",
        description: "Get the current weather in a given location",
        parameters: {
          type: "object",
          properties: {
            location: {
              type: "string",
              description: "The city and state, e.g. San Francisco, CA",
            },
            unit: { type: "string", enum: ["celsius", "fahrenheit"] },
          },
          required: ["location"],
        },
      },
    },
    {
      type: "function",
      function: {
        name: "get_table_reservations",
        description: "Tell the user if a table is available for the number of guests and time they request",
        parameters: {
          type: "object",
          properties: {
            numGuests: {
              type: "integer",
              description: "The number of guests",
            },
            bookingTime: { type: "string", description: "The time requested for a reservation, eg. 8:30 PM" },
          },
          required: ["numGuests", "bookingTime"],
        },
      },
    },
  ];


  const response = await openai.chat.completions.create({
    model: "gpt-3.5-turbo-1106",
    messages: messages,
    tools: tools,
    tool_choice: "auto", // auto is default, but we'll be explicit
  });
  const responseMessage = response.choices[0].message;

  // Step 2: check if the model wanted to call a function
  const toolCalls = responseMessage.tool_calls;
  if (responseMessage.tool_calls) {
    // Step 3: call the function
    // Note: the JSON response may not always be valid; be sure to handle errors
    const availableFunctions = {
      get_current_weather: getCurrentWeather,
      get_table_reservations: get_table_reservations
    }; // only one function in this example, but you can have multiple
    messages.push(responseMessage); // extend conversation with assistant's reply
    for (const toolCall of toolCalls) {
      const functionName = toolCall.function.name;
      const functionToCall = availableFunctions[functionName];
      const functionArgs = JSON.parse(toolCall.function.arguments);
      console.log('Arguments:', toolCall.function.arguments, 'name:', functionName); // Add this line to debug
      const functionResponse = functionToCall(
        functionArgs.bookingTime,
        functionArgs.numGuests
      );
      messages.push({
        tool_call_id: toolCall.id,
        role: "tool",
        name: functionName,
        content: functionResponse,
      }); // extend conversation with function response
    }
    const secondResponse = await openai.chat.completions.create({
      model: "gpt-3.5-turbo-1106",
      messages: messages,
    }); // get a new response from the model where it can see the function response
    return secondResponse.choices;
  }
}


runConversation().then(console.log).catch(console.error);

I seriously need someone’s help. I have been struggling with this for a while. If someone could fix my code (I know how to fix the error could not parse JSON body - but I have no idea how to fix the underlying issue which is how do I use assistants, threadIDs, and function calling all at the same time). If you could fix my code, ideally by fixing my NodeJS code and pasting the fixed code in full as a response, that would be GREATLY appreciated!! Thanks