Hello, I need help with changing the model because when I run the website it give me the error openai.error.InvalidRequestError: The model text-davinci-003
has been deprecated, learn more here: https://platform.openai.com/docs/deprecations
I tried implementing the gpt-3.5-turbo-instruct but it doesn’t work, my code is quite long but I’ll show the snippet where I assume the error is coming from and my imports:
from flask import Flask, render_template, request, redirect, url_for, session, Response
import os
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.callbacks import get_openai_callback
import io
import boto3
import botocore
import uuid
from werkzeug.security import check_password_hash, generate_password_hash
@app.route(‘/continuous_questioning’, methods=[‘GET’, ‘POST’])
def continuous_questioning():
# Retrieve the current user ID using session cookie
user_id = session.get(‘user_id’, None)
if user_id is None:
return redirect(url_for(‘index’))
# Retrieve user-specific data from session
pdf_key = session.get('pdf_key', None)
# Retrieve user-specific responses from DynamoDB
responses = get_user_responses(user_id)
# Initialize a flag to indicate if the answer is processing
answer_processing = False
if request.method == 'POST':
query = request.form['query']
if query and pdf_key:
# Set the flag to indicate that the answer is processing
answer_processing = True
# Download PDF file from S3
pdf_content = download_file_from_s3(pdf_key)
if pdf_content:
# Read the PDF file
with io.BytesIO(pdf_content) as pdf_file:
pdf_reader = PdfReader(pdf_file)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text=text)
# Replace VectorStore with the appropriate vector store or embedding mechanism
# based on your specific use case
embeddings = OpenAIEmbeddings(model="gpt-3.5-turbo")
VectorStore = FAISS.from_texts(chunks, embedding=embeddings)
docs = VectorStore.similarity_search(query=query, k=3)
llm = OpenAI()
chain = load_qa_chain(llm=llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=query)
# Append the question and its answer as a pair to user-specific responses
responses.append({'question': query, 'answer': response})
# Update user-specific responses in DynamoDB
set_user_responses(user_id, responses)
return render_template('continuous_questioning.html', responses=responses, answer_processing=answer_processing)
return render_template('continuous_questioning.html', responses=responses, answer_processing=answer_processing)
Please update my code with the new model because this project is for my thesis that has to be submitted in 3 days.