'''example completion with openai > 1.1'''
from openai import OpenAI
client = OpenAI()
prompt = "True or false: a banana is smaller than a lemon.\n\n"
response = client.completions.create(
prompt=prompt,
model="gpt-3.5-turbo-instruct",
top_p=0.5, max_tokens=50,
stream=True)
for part in response:
print(part.choices[0].text or "")
That’s great to hear! The code snippet above, mind you, is for gpt-3.5-turbo-instruct, a special “completion” model that operates a bit differently than the “chat” you may be expecting.
Here’s getting a response from the chat gpt-3.5-turbo AI with the latest Python openai library:
from openai import OpenAI
client = OpenAI()
system = [{"role": "system", "content": "You are HappyBot."}]
chat_history = [] # past user and assistant turns for AI memory
user = [{"role": "user", "content": "Are you fully operational?"}]
chat_completion = client.chat.completions.create(
messages = system + chat_history + user,
model="gpt-3.5-turbo",
max_tokens=25, top_p=0.9,
)
print(chat_completion.choices[0].message.content)