Resolving OpenAI API Model Deprecation Errors in Python

import openai

openai.api_key = “api”

def get_response(prompt):
try:
response = openai.Completion.create(
engine=“text-davinci-003”,
prompt=prompt,
max_tokens=100,
n=1,
stop=[“In summary”, “Example:”, “Summary:”, “Key points:”],
temperature=0.7,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
best_of=1
)
return response.choices[0].text.strip()
except Exception as e:
print(“An error occurred:”, e)
return None

def main():
print(“Welcome to the AI Q&A System! Type ‘exit’ to quit.”)

while True:
    user_input = input("Ask your question: ")
    
    if user_input.lower() == "exit":
        print("Thank you for using the AI Q&A System. Goodbye!")
        break
    
    response = get_response(user_input)
    if response:
        print("AI's response:")
        print(response)
    else:
        print("Sorry, I couldn't generate a response. Please try again.")

if name == “main”:
main()

Hi!

Unfortunately, you have to “upgrade” to

gpt-3.5-turbo-instruct

https://platform.openai.com/docs/api-reference/completions

We mourn davinci every day.

:coffin:

1 Like

Hello @xvenom ,
Unfortunately the engine you are using is Deprecated.
You will have to change the engine to something like
engine=“gpt-3.5-turbo”

Try this.

And anxiously await the prophesized gpt-4-turbo-intruct or dare I dream, the gpt-4-turbo-fiction model! Small smile.

That won’t work as he was using an instruct model, not a chat completion model. Appreciate you chiming in, though!

1 Like

Actually, there’s a lot that won’t work.

I’ve posted so many small examples of quick “chatbots” in python, both using the old and new library…

that the only way to entertain myself is to write a new “example” that is using the >1.10.0 python library asynchronous client, has word-wrapping, is just one variable change to switch from handling stream:True, has tool with a function, gets headers and displays one for you, and also dumps all the chunks of tool_call as received…

import json
import asyncio
import openai
import re

class WrapPrint:
    """
    A Printer class that receives small chunks of text data and performs word wrap.
    The class object keeps track of the line length, and print_word adds text to the line,
    wrapping if a new word exceeds the length of the line.
    """
    
    def __init__(self, max_len=70, indent=2, breaks=[" ", "-", "\n"]):
        # Attributes:
        self.max_len = max_len  #The maximum length of a line before wrapping.
        self.indent = indent  #The number of spaces to indent new lines.
        self.breaks = breaks  #A list of characters that can be used as line breaks.
        self.line_length = -1  #The current length of the line being printed.

    def print_word(self, word):
        if not word:  # guard against nulls
            return
        if ((len(word) + self.line_length > self.max_len  # if word goes over max
                and (word and word[0] in self.breaks))    # and starts with break
                or self.line_length == -1):             # or is at init condition
            print("")              # go to new line
            self.line_length = 0   # reset the cursor position counter
            word = word.lstrip()   # and trim white spaces off start of word

        if self.line_length == 0:  # if at start of line...
            print(" " * self.indent, end="") # indent
            self.line_length = self.indent   # set the cursor to position

        print(word, end="")  # then finally print the word

        if word.endswith("\n"):  # Detect in-document line feed
            print(" " * self.indent, end="")  # and indent those also
            self.line_length = self.indent
        self.line_length += len(word)  # advance cursor position to end of printed word


    def print_text(self, text):

        words = re.findall(r'\S+|\s+', text)
        for word in words:
            self.print_word(word)
        # self.reset()  # Reset the line length after printing the text


    def reset(self):
        self.line_length = 0  # external call to reset cursor


async def get_result(g):
    printer = WrapPrint()
    reply = ""

    async for i in g:
        if i.startswith("data: "):
            data_str = i[len("data: "):].strip()
            if data_str == "[DONE]":
                break
            data_dict = json.loads(data_str)
            delta = data_dict['choices'][0]['delta']
            if not 'content' in delta or 'tool_calls' in delta:
                print(f"\n{data_dict}")  # tool chunks
            else:
                content = delta['content'] if 'content' in delta else ""
                printer.print_text(content)
                reply += content
    printer.reset()
    return reply

async def main():
    tool_list = [
      {
          "type": "function",
          "function": {
              "name":  "fortune",
              "description": "Prints fortune of the day direct to UI\n- prefer short length",
              "parameters": {
                "type": "object",
                "properties": {
                    "count": {
                        "type": "number",
                        "description": "Number of fortunes, default 1 if unspecified",
                    },
                    "length": {
                        "type": "string",
                        "enum": ["short", "long"],
                    },
                },
                "required": ["length"]
            },
          }
      }
    ]
    system = [{"role": "system", "content":
    """You are jBot, based on gpt-3.5-turbo, released November 2023.
    Knowledge cutoff: January 2022"""}]
    user = [{"role": "user", "content":
    "Introduce yourself. Mention fortune tool, which emits tool_call to API."
    }]

    model       = "gpt-3.5-turbo-0125"
    temperature = 0.5  # 0.0-2.0
    top_p       = 0.5  # 0.0-1.0
    max_tokens  = 999  # response reservation from context length
    stream_var   = True
    client = openai.AsyncOpenAI()

    chat        = []  # contains past user/AI messages
    turns       = 6   # chat history passed

    while user[0]['content'] not in ["", "exit"]:
        apiresponse = None
        headers = None
        
        try:
            apiresponse = await client.chat.completions.with_raw_response.create(
                messages    = system + chat[-turns*2:] + user,  # concatenate lists
                model       = model,
                temperature = temperature,
                max_tokens  = max_tokens, # maximum response length
                stop        = "",
                top_p       = top_p,
                presence_penalty = 0.0,  # penalties -2.0 - 2.0
                frequency_penalty = 0.01, # frequency = cumulative score
                n           = 1,
                stream      = stream_var,
                logit_bias  = {"100066": -1},  # example, '~\n\n' token
                user        = "site_user-id",
                tools       = tool_list
            )
        except Exception as e:
            print(f"An error occurred when calling the API: {e}")
            print(f"Try another prompt, or type 'exit'")
            prompt = input("\nPrompt: ")
            user = [{"role": "user", "content": prompt}]  # update `user` with new input
            continue

        # load headers to variables like "headers_x_ratelimit_remaining_tokens"
        if apiresponse.headers:
            globals()["r"] = apiresponse  # for console debugging of object parse
            headers_dict = apiresponse.headers.items().mapping.copy()
            for key, value in headers_dict.items():
                variable_name = f'headers_{key.replace("-", "_")}'
                globals()[variable_name] = value

        if stream_var == True:
            reply = ""
            response = apiresponse.parse()
            await response.response.aread()
            g = response.response.aiter_lines()
            reply = await get_result(g)
        else:
            reply = apiresponse.parse().choices[0].message.content
            printer = WrapPrint()
            printer.print_text(reply)
            
        print(f"\n(x_ratelimit_remaining_tokens: {headers_x_ratelimit_remaining_tokens})")

        # record message and response dictionaries to chat history list
        chat += user + [{"role": "assistant", "content": reply}]
        prompt = input("\nPrompt: ")
        if prompt and prompt[:2] == "//":
            mods(prompt[2:].strip())
            continue
        else:
            # take user input, placed directly into a new role dictionary and list
            user = [{"role": "user", "content": prompt}]

if __name__ == "__main__":
    asyncio.run(main())

Hopefully still a bit “example”.