Here is the python code i used to train it
import openai
import PyPDF2
Read the PDF document
with open(‘mydoc.pdf’, ‘rb’) as file:
pdf_reader = PyPDF2.PdfReader(file)
document_text = “”
for page in pdf_reader.pages:
document_text += page.extract_text()
Set up your OpenAI API credentials
openai.api_key = my own api key
Split document into chunks
max_token_length = 4096
chunks = [document_text[i:i+max_token_length] for i in range(0, len(document_text), max_token_length)]
Train a new model with each chunk
model_id = None
training_data = “”
for i, chunk in enumerate(chunks):
# Prepare messages for the chunk
messages = [
{“role”: “system”, “content”: “You are a helpful assistant.”},
{“role”: “user”, “content”: chunk}
]
# Fine-tune a new model with the chunk
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
# Get the trained model ID
model_id = response['id']
Use the trained model ID in subsequent API calls
print(“Trained model ID:”, model_id)
So it output the mode_id which i used in the following python code
import openai
import gradio as gr
openai.api_key = my own api key
messages = [
{“role”: “system”, “content”: “You are a helpful and kind AI Assistant.”},
]
def chatbot(input):
if input:
messages.append({“role”: “user”, “content”: input})
chat = openai.ChatCompletion.create(
model = “chatcmpl-followed by a load of numbers and letters”,
messages=messages
)
reply = chat.choices[0].message.content
messages.append({“role”: “assistant”, “content”: reply})
return reply
inputs = gr.inputs.Textbox(lines=7, label=“Chat with AI”)
outputs = gr.outputs.Textbox(label=“Reply”)
gr.Interface(fn=chatbot, inputs=inputs, outputs=outputs, title=“AI Chatbot”,
description=“Ask anything you want”,
theme=“compact”).launch(share=True)