Same problem here! I get exactly the same error while I use the same code as in the documentation.
Here is a snippet from my code:
messages = [
{
"role": "user",
"content": [
{
"type":"text",
"text":prompt
}
]+[
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{encode_image(image_path)}"
}
}
for image_path in images
]
}
]
chat_completion = self.openai.chat.completions.create(
model=self.config["model_name"], # Choose the engine according to your OpenAI plan
messages=messages,
max_tokens=min(n_predict, 128), # Adjust the desired length of the generated response
n=1, # Specify the number of responses you want
temperature=gpt_params["temperature"], # Adjust the temperature for more or less randomness in the output
stream=True
)