"You must provide a model parameter" error

import requests
import json

class MondayClient:
    def __init__(self, api_key):
        self.api_url = "https://api.monday.com/v2"
        self.headers = {"Authorization": api_key}

    def make_request(self, query):
        data = {'query': query}  # Don't use json.dumps here
        response = requests.post(self.api_url, headers=self.headers, json=data)  # Use json parameter instead of data
        return response.json()

    def create_item(self, board_id, item_name):
        query = f'''
        mutation {{
          create_item (board_id: {board_id}, item_name: "{item_name}") {{
            id
          }}
        }}
        '''
        return self.make_request(query)

    def change_item_name(self, board_id, item_id, new_name):
        query = f'''
        mutation {{
          change_column_value (board_id: {board_id}, item_id: {item_id}, column_id: "name", value: {{"text": "{new_name}"}}) {{
            id
          }}
        }}
        '''
        return self.make_request(query)

    # Add more methods as needed

class GPTInterpreter:
    def __init__(self, openai_api_key):
        self.api_url = "https://api.openai.com/v1/engines/davinci/completions"
        self.headers = {
            'Authorization': f'Bearer {openai_api_key}',
            'Content-Type': 'application/json'
        }

    def interpret_request(self, user_request):
        data = json.dumps({
            'prompt': user_request,
            'max_tokens': 100
        })
        response = requests.post(self.api_url, headers=self.headers, data=data)
        return response.json()

# Usage
openai_api_key = 'X'
monday_api_key = 'Y'

gpt = GPTInterpreter(openai_api_key)
monday_client = MondayClient(monday_api_key)

user_request = "Add a new item called 'Task1' to board 5773096249"

# GPT interprets the request
interpreted_request = gpt.interpret_request(user_request)
# Custom logic needed here to parse the GPT's response and decide the action on Monday.com

# Example action based on interpreted request (this is a placeholder, real implementation depends on GPT's response)
board_id = 5773096249  # Extract this from GPT's response
item_name = "TaskABCDE"   # Extract this from GPT's response
response = monday_client.create_item(board_id, item_name)
print(response)

Inside GPTInterpreter, when I change

self.api_url = "https://api.openai.com/v1/engines/davinci/completions"

to a more updated one e.g.

self.api_url = "https://api.openai.com/v1/chat/completions"

print(interpreted_request) goes from working fine to giving the error “You must provide a model parameter”.

Why is this?

Previously the model name was in the URL. Now, you need to send the model as a parameter…

Why does the first one work then? That’s also model name in URL?

The issue you encounter is in using chat models properly.

Your snippet:

        data = json.dumps({
            'prompt': user_request,
            'max_tokens': 100
        })

No “model” was specified, such as 'model': 'gpt-3.5-turbo',

Additionally, the chat completion method does not accept a “prompt”, instead it accepts a list of json messages, with keys of “role” and “content”, with accepted roles restricted.

https://platform.openai.com/docs/api-reference/chat/create

You might save yourself time by using the methods of the openai python library, also demonstrated at the link above.

Understanding all the API parameters will help you make useful API calls.


Above all, AI models don’t have recent knowledge of API changes to write OpenAI code for you.

You can also see AI-generated lazy placeholders because it doesn’t know what to do with the output.