I'm using v2 Assistants, but get "Error 400: Requested model 'gpt-4o' cannot be used with the Assistants API in v1"

My v2 assistants return an error code 400: Requested model ‘gpt-4o’ cannot be used with the Assistants API in v1. I’ve looked through the migration guide and everything is in order.

  1. I’m using the latest OpenAI NodeJS API (version ^4.47.3) in a Firebase cloud function.
  2. All assistants are using v2.
  3. All of the above code works if I switch the Assistant to a previous model (such as ‘gpt-4-turbo’). It will return the expected result without any issues.

Here is my code in the cloud function ‘openAIAssistant.js’:

import OpenAI from 'openai';

const openAI = (timeout) => {
  return new OpenAI({
    organization: process.env.OPENAI_ORGANIZATION_KEY,
    project: process.env.OPENAI_PROJECT_KEY,
    apiKey: process.env.OPENAI_API_KEY,
    timeout: timeout * 1000,
    defaultHeaders: { 'OpenAI-Beta': 'assistants=v2' },

const createThread = async (openai) => {
  return await openai.beta.threads.create();
const createMessage = async (openai, data) => {
  return await openai.beta.threads.messages.create(data.threadId, data.messages);
const runAssistant = async (openai, data) => {
  const run = await openai.beta.threads.runs.create(data.threadId, { assistant_id: data.assistantId });
  console.log(`Created new run`, run);
  return run;
const runStatus = async (openai, data) => {
  return await openai.beta.threads.runs.retrieve(data.threadId, data.runId);

exports.handler = async function (data, context) {

  const openai = openAI(120);

  switch (data.action) {
    case 'createThread':
      return await createThread(openai);
    case 'createMessage':
      return await createMessage(openai, data);
    case 'runAssistant':
      return await runAssistant(openai, data);
    case 'runStatus':
      return await runStatus(openai, data);
      throw new Error('Action not found');

And in ‘index.js’:

const openAIAssistant = require('./https/openAIAssistant');

exports.openAIAssistant = functions.https.onCall(async (data, context) => {
  return await openAIAssistant.handler(data, context);

Now for client-side code:

On the client-side:

    //Utility method used internally.
    async _callAssistant(data, action) {
      const openAI = firebase.functions().httpsCallable('openAIAssistant');
      data.action = action;
      return await openAI(data);

    async createThread() {
      const data = {};
      return await this._callAssistant(data, 'createThread');
    async createMessage(threadId, messages) {
      const data = { threadId, messages };
      return await this._callAssistant(data, 'createMessage');
    async runAssistant(assistantId, threadId, instructions) {
      const data = { threadId, assistantId, instructions };
      return await this._callAssistant(data, 'runAssistant');
    async runStatus(threadId, runId) {
      const data = { threadId, runId };
      return await this._callAssistant(data, 'runStatus');

//Called by the client somewhere.
async generate() {
    const message = { role: 'user', content: 'Example text' };
    const thread = await this.createThread();

    await this.createMessage(thread.data.id, message);
    const response = await this.runAssistant(this.assistant.data.id, thread.data.id, '');

//This function calls 'runStatus' periodically to check if the result is complete and then do something with it (though it's never reached with gpt-4o due to the server-side error.)
    await this.checkStatus(thread, response);

One of the assistants.
Cloud function log error.