Getting error "You must provide a model parameter"

I am trying to use chatgpt 3.5 turbo model for chat completion in my nodejs code. However, on trying to hit the API using postman, I get an error saying You must provide a model parameter. What am I missing?

const express = require('express');
const {Configuration, OpenAIApi} = require('openai');

const app = express();

const configuration = new Configuration({
  apiKey: 'MY_API_KEY',
});
const openai = new OpenAIApi(configuration);

app.use(express.json());

app.post('/chat', async (req, res) => {
  try {
    const response = openai.createChatCompletion({
      model: 'gpt-3.5-turbo',
      messages: [
        {role: 'system', content: 'You are a helpful assistant.'},
        {role: 'user', content: req.body.message},
      ],
      stream: true,
    });
    console.log('res:', response.data.choices[0].message);
    const responseData = [];

    for await (const chunk of response) {
      const parsed = parseChunk(chunk);
      responseData.push(parsed.choices[0].message.content);
    }

    res.json({messages: responseData});
  } catch (error) {
    console.error('Error:', error);

    res.status(500).json({error: 'An error occurred'});
  }
});

const PORT = process.env.PORT || 3000;

app.listen(PORT, () => {
  console.log(`Server is running on port ${PORT}`);
});

The model seems correct. However I think you might be missing await in the following line:
const response = openai.createChatCompletion({
so it becomes const response = await openai.createChatCompletion({

You might also want to test it without the stream. I’m not familiar enough with this to get it running with stream on, but it’s a lot easier to debug problems like this with it off. It seems like response.data.choices[0].message might be a different structure to the actual streamed response.

1 Like