How can I save each conversation with my own AI chatbot ( trained only with my data and answer my questions)

Hello,
We are an ERP system support software company helping our users with their daily tasks in running their business.
We have a lot of inquiries from our customers and we want OpenAI to help us answer them.
We have a SQL database of all the questions and inquiries from customers, and we want to train the ChatGPT to answer them correctly, in order to help our customer support team.
With Python code, we take the data from our SQL database (MS SQL), make it into a PDF file ( or directly uploaded in Pinecone/Milvus vector database) and upload it to the Pinecone/Milvus vector database.
From there, again with Python code and with the Streamlit, and Langchain (and also ConversationBufferWindowMemory) we try to make a chatbot that pulls information from Pinecone/Milvus and answers the questions through OpenAI.
But how can we save each conversation with this chatbot for future reference/answer with Python code? How can save the whole conversation when the chatbot cannot answer to the client?
Thanks a lot!

1 Like

Hi,

You would need to find out where in the streamlit code the API calls are being made and then write those and the replies to a file, it’s a trivial task if you are making the API calls yourself, but I am not familiar with streamlits code.

1 Like

Try this:

The node.js script saves every request and completion to a JSON file. It then sends this data as conversation history with each new prompt. The script also records the number of tokens used. However, the code to calculate the amount of history that can be sent is still in development.

// Import required modules
const fs = require('fs');
const axios = require('axios');

// Your OpenAI API key
const apiKey = 'your-openai-api-key';

// Function to interact with OpenAI API
async function interactWithAI(userPrompt) {
    try {
        // Define the message data structure
        let messageData = { 'messages': [] };

        // If requests.json exists, read and parse the file
        if (fs.existsSync('requests.json')) {
            let raw = fs.readFileSync('requests.json');
            messageData = JSON.parse(raw);
        }

        // Format the conversation history and the new user request
        let systemMessage = "Conversation history:\n" + messageData['messages'].map(m => `${m.role} [${m.timestamp}]: ${m.content}`).join("\n");
        let userMessage = "New request: " + userPrompt;

        // Make a POST request to OpenAI's chat API
        let response = await axios({
            method: 'post',
            url: 'https://api.openai.com/v1/chat/completions',
            headers: { 'Authorization': `Bearer ${apiKey}`, 'Content-Type': 'application/json' },
            data: { 'model': 'gpt-4', 'messages': [ { "role": "system", "content": systemMessage }, { "role": "user", "content": userMessage } ] }
        });

        // Log the AI's response
        console.log(response.data['choices'][0]['message']['content']);

        // Get the current timestamp
        let timestamp = new Date().toISOString();

        // Add the new user request and the AI's response to the message history
        messageData['messages'].push({ 
            "role": "user", 
            "content": userPrompt, 
            "timestamp": timestamp, 
            "tokens": response.data['usage']['prompt_tokens'] // Include prompt tokens
        });

        messageData['messages'].push({ 
            "role": "assistant", 
            "content": response.data['choices'][0]['message']['content'], 
            "timestamp": timestamp, 
            "tokens": response.data['usage']['completion_tokens'] // Include completion tokens
        });

        // Write the updated message history to requests.json
        fs.writeFileSync('requests.json', JSON.stringify(messageData, null, 2));

        // Return the AI's response
        return response.data['choices'][0]['message']['content'];
    } catch (e) {
        // If an error occurred, log it to the console and return an error message
        console.error('An error occurred:', e);
        return 'An error occurred while interacting with the OpenAI API. Please check the console for more details.';
    }
}

2 Likes

UPDATE:

In this updated code, the function calculateTokens() calculates the number of tokens available for the conversation history by subtracting the user prompt tokens and the maximum response tokens (1000) from the total limit of 8192.

After that, it iterates over the messages in reverse order (most recent first) and adds them to the newMessageList until the total number of tokens in the newMessageList reaches the availableTokens limit.

The newMessageList (which is the adjusted conversation history) and the new user request are then sent to the OpenAI API.

In addition, I refactored the code.

Here you go…

// Import required modules
const fs = require('fs').promises;
const axios = require('axios');

// Your OpenAI API key
const API_KEY = process.env.OPENAI_API_KEY || 'your-openai-api-key';
const API_URL = 'https://api.openai.com/v1/chat/completions';
const MAX_TOKENS = 8192;
const MAX_RESPONSE_TOKENS = 1000;

// Function to calculate tokens
function calculateTokens(text) {
    return Math.ceil(text.length / 4);
}

// Helper function to build message list
function buildMessageList(messageData, availableTokens) {
    let newMessageList = [];
    let historyTokens = 0;

    // Iterate over the messages in reverse order (most recent first)
    for (let i = messageData['messages'].length - 1; i >= 0; i--) {
        let messageTokens = messageData['messages'][i]['tokens'];
        if (historyTokens + messageTokens > availableTokens) {
            break;
        }
        newMessageList.unshift(messageData['messages'][i]);
        historyTokens += messageTokens;
    }

    return newMessageList;
}

// Function to interact with OpenAI API
async function interactWithAI(userPrompt) {
    try {
        // Define the message data structure
        let messageData = { 'messages': [] };

        // If requests.json exists, read and parse the file
        try {
            const rawData = await fs.readFile('requests.json');
            messageData = JSON.parse(rawData);
        } catch (err) {
            console.error('An error occurred while reading requests.json:', err);
        }

        // Calculate tokens for the user prompt
        let userPromptTokens = calculateTokens(userPrompt);

        // Calculate available tokens for the history
        let availableTokens = MAX_TOKENS - userPromptTokens - MAX_RESPONSE_TOKENS;

        // Create a new message list for history within token limit
        let newMessageList = buildMessageList(messageData, availableTokens);

        // Make a POST request to OpenAI's chat API
        let response = await axios({
            method: 'post',
            url: API_URL,
            headers: { 'Authorization': `Bearer ${API_KEY}`, 'Content-Type': 'application/json' },
            data: { 'model': 'gpt-4', 'messages': newMessageList.concat({ "role": "user", "content": userPrompt }) }
        });

        // Log the AI's response
        console.log(response.data['choices'][0]['message']['content']);

        // Get the current timestamp
        let timestamp = new Date().toISOString();

        // Add the new user request and the AI's response to the message history
        messageData['messages'].push({ 
            "role": "user", 
            "content": userPrompt, 
            "timestamp": timestamp, 
            "tokens": response.data['usage']['prompt_tokens'] // Include prompt tokens
        });

        messageData['messages'].push({ 
            "role": "assistant", 
            "content": response.data['choices'][0]['message']['content'], 
            "timestamp": timestamp, 
            "tokens": response.data['usage']['completion_tokens'] // Include completion tokens
        });

        // Write the updated message history to requests.json
        try {
            await fs.writeFile('requests.json', JSON.stringify(messageData, null, 2));
        } catch (err) {
            console.error('An error occurred while writing to requests.json:', err);
        }

        // Return the AI's response
        return response.data['choices'][0]['message']['content'];
    } catch (e) {
        // If an error occurred, log it to the console and return an error message
        console.error('An error occurred while interacting with the OpenAI API:', e);
        return 'An error occurred while interacting with the OpenAI API. Please check the console for more details.';
    }
}

1 Like

Have you considered using GitHub - ceifa/tiktoken-node: OpenAI's tiktoken but with node bindings to do the token calcs rather than /4 the .length? extra complexity but I think the added accuracy might be worth it.

1 Like

Take a look at this GitHub repo:

The developer wrote about it here:

It uses Streamlit as well so digging through the code may give you some insights.

2 Likes

I was only aware of the python version so I hacked this together. Thanks for the tip. I’m going to try it out.

2 Likes

Thank you so much for your help and time!

1 Like

UPDATED:
Replaced javascript token calculator with ceifa/tiktoken-node, as suggested by @Foxalabs.

// Import required modules
const fs = require('fs').promises;
const axios = require('axios');
const tiktoken = require('tiktoken-node');

// Your OpenAI API key
const API_KEY = process.env.OPENAI_API_KEY || 'your-openai-api-key';
const API_URL = 'https://api.openai.com/v1/chat/completions';
const MAX_TOKENS = 8192;
const MAX_RESPONSE_TOKENS = 1000;

// Get the encoding for the 'gpt-4' model
let enc = tiktoken.encodingForModel("gpt-4");

// Function to calculate tokens using tiktoken
function calculateTokens(text) {
    return enc.encode(text).length;
}

// Helper function to build message list
function buildMessageList(messageData, availableTokens) {
    let newMessageList = [];
    let historyTokens = 0;

    // Iterate over the messages in reverse order (most recent first)
    for (let i = messageData['messages'].length - 1; i >= 0; i--) {
        let messageTokens = messageData['messages'][i]['tokens'];
        if (historyTokens + messageTokens > availableTokens) {
            break;
        }
        newMessageList.unshift(messageData['messages'][i]);
        historyTokens += messageTokens;
    }

    return newMessageList;
}

// Function to interact with OpenAI API
async function interactWithAI(userPrompt) {
    try {
        // Define the message data structure
        let messageData = { 'messages': [] };

        // If requests.json exists, read and parse the file
        try {
            const rawData = await fs.readFile('requests.json');
            messageData = JSON.parse(rawData);
        } catch (err) {
            console.error('An error occurred while reading requests.json:', err);
        }

        // Calculate tokens for the user prompt
        let userPromptTokens = calculateTokens(userPrompt);

        // Calculate available tokens for the history
        let availableTokens = MAX_TOKENS - userPromptTokens - MAX_RESPONSE_TOKENS;

        // Create a new message list for history within token limit
        let newMessageList = buildMessageList(messageData, availableTokens);

        // Make a POST request to OpenAI's chat API
        let response = await axios({
            method: 'post',
            url: API_URL,
            headers: { 'Authorization': `Bearer ${API_KEY}`, 'Content-Type': 'application/json' },
            data: { 'model': 'gpt-4', 'messages': newMessageList.concat({ "role": "user", "content": userPrompt }) }
        });

        // Log the AI's response
        console.log(response.data['choices'][0]['message']['content']);

        // Get the current timestamp
        let timestamp = new Date().toISOString();

        // Add the new user request and the AI's response to the message history
        messageData['messages'].push({ 
            "role": "user", 
            "content": userPrompt, 
            "timestamp": timestamp, 
            "tokens": response.data['usage']['prompt_tokens'] // Include prompt tokens
        });

        messageData['messages'].push({ 
            "role": "assistant", 
            "content": response.data['choices'][0]['message']['content'], 
            "timestamp": timestamp, 
            "tokens": response.data['usage']['completion_tokens'] // Include completion tokens
        });

        // Write the updated message history to requests.json
        try {
            await fs.writeFile('requests.json', JSON.stringify(messageData, null, 2));
        } catch (err) {
            console.error('An error occurred while writing to requests.json:', err);
        }

        // Return the AI's response
        return response.data['choices'][0]['message']['content'];
    } catch (e) {
        // If an error occurred, log it to the console and return an error message
        console.error('An error occurred while interacting with the OpenAI API:', e);
        return 'An error occurred while interacting with the OpenAI API. Please check the console for more details.';
    }
}

1 Like