Apologies, I am a non programmer. I made my chatbot with chatgpt.
When I use the playground, I have the full 450k token limit. But in the chatbot I built I often run into this sort of error
“Error: Error code: 429 - {‘error’: {‘message’: ‘Request too large for gpt-4 in organization org-IE9BHcv18o5uhhjWBJVgurio on tokens per min (TPM): Limit 40000, Requested 191212. The input or output tokens must be reduced in order to run successfully. Visit https://platform.openai.com/account/rate-limits to learn more.’, ‘type’: ‘tokens’, ‘param’: None, ‘code’: ‘rate_limit_exceeded’}}”
Or another one saying the context window is only about 8100 tokens. The chatbot also forgets the thread of the conversation as quickly as the regular free chatgpt does.
Here is my python code, what am I doing wrong?
from flask import Flask, request, jsonify, render_template
from openai import OpenAI
client = OpenAI(api_key=‘sk-proj-yaddayaddayaddaH’) # Correct import
import json
import os
from datetime import datetime
app = Flask(name)
Directory for saving conversation histories
DATA_DIR = ‘data’
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
def save_conversation(project, conversation):
project_dir = os.path.join(DATA_DIR, project)
if not os.path.exists(project_dir):
os.makedirs(project_dir)
filename = os.path.join(project_dir, f"conversation_{datetime.now().isoformat()}.json")
with open(filename, 'w') as f:
json.dump(conversation, f)
def load_conversation_history(project):
project_dir = os.path.join(DATA_DIR, project)
conversations =
if os.path.exists(project_dir):
for filename in sorted(os.listdir(project_dir)):
if filename.endswith(‘.json’):
filepath = os.path.join(project_dir, filename)
with open(filepath, ‘r’) as f:
conversation = json.load(f)
conversations.append(conversation)
return conversations
@app.route(‘/’)
def index():
return render_template(‘index.html’)
@app.route(‘/chat’, methods=[‘POST’])
def chat():
data = request.json
prompt = data[‘prompt’]
project = data[‘project’]
# Load conversation history
conversation_history = load_conversation_history(project)
# Combine prompt with conversation history
messages = []
for convo in conversation_history:
messages.append({"role": "user", "content": convo['prompt']})
messages.append({"role": "assistant", "content": convo['response']})
messages.append({"role": "user", "content": prompt})
try:
response = client.chat.completions.create(model="gpt-4", # Use the appropriate model name as needed
messages=messages)
conversation = {
"prompt": prompt,
"response": response.choices[0].message.content,
"tokens_used": response.usage.total_tokens
}
save_conversation(project, conversation)
return jsonify(conversation)
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route(‘/projects’)
def get_projects():
projects = [d for d in os.listdir(DATA_DIR) if os.path.isdir(os.path.join(DATA_DIR, d))]
return jsonify(projects)
@app.route(‘/conversations/’)
def get_conversations(project):
project_dir = os.path.join(DATA_DIR, project)
conversations =
if os.path.exists(project_dir):
conversations = [f for f in os.listdir(project_dir) if f.endswith(‘.json’)]
return jsonify(conversations)
@app.route(‘/conversation//’)
def get_conversation(project, filename):
filepath = os.path.join(DATA_DIR, project, filename)
conversation = {}
if os.path.exists(filepath):
with open(filepath, ‘r’) as f:
conversation = json.load(f)
return jsonify(conversation)
if name == ‘main’:
app.run(debug=True)
I also use this js:
document.addEventListener(‘DOMContentLoaded’, () => {
const projectList = document.getElementById(‘project-list’);
const conversationHistory = document.getElementById(‘conversation-history’);
const projectNameInput = document.getElementById(‘project-name’); // New input field
const promptArea = document.getElementById(‘prompt’);
const sendButton = document.getElementById(‘send-btn’);
// Load list of projects
fetch('/projects')
.then(response => response.json())
.then(projects => {
projects.forEach(project => {
const li = document.createElement('li');
li.innerText = project;
li.onclick = () => loadConversations(project);
projectList.appendChild(li);
});
});
// Load conversations for a project
function loadConversations(project) {
fetch(`/conversations/${project}`)
.then(response => response.json())
.then(conversations => {
conversationHistory.innerHTML = '';
conversations.forEach(conversation => {
loadConversation(project, conversation);
});
});
}
// Load a specific conversation and append to conversation history
function loadConversation(project, filename) {
fetch(`/conversation/${project}/${filename}`)
.then(response => response.json())
.then(data => {
const formattedResponse = data.response.replace(/\n/g, '<br>');
let div = document.createElement('div');
div.innerHTML = `<b>Prompt:</b> ${data.prompt}<br><b>Response:</b> ${formattedResponse}<br><b>Tokens used:</b> ${data.tokens_used}`;
conversationHistory.appendChild(div);
});
}
// Send a message to the server
sendButton.onclick = () => {
const prompt = promptArea.value;
const project = projectNameInput.value.trim(); // Get the project name from the input field
if (prompt.trim() === '') {
alert('Prompt cannot be empty');
return;
}
if (project === '') {
alert('Project name cannot be empty');
return;
}
fetch('/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ prompt, project })
})
.then(response => response.text()) // fetch the response as text
.then(text => {
console.log('Raw response:', text); // log the raw response
const data = JSON.parse(text); // parse the response as JSON
if (data.error) {
alert(`Error: ${data.error}`);
} else {
const formattedResponse = data.response.replace(/\n/g, '<br>');
conversationHistory.innerHTML += `<div><b>User:</b> ${prompt}</div><div><b>GPT:</b> ${formattedResponse}</div><div><b>Tokens used:</b> ${data.tokens_used}</div>`;
promptArea.value = '';
}
})
.catch(error => {
console.error('Error:', error);
});
};
});