We are currently working with the OpenAI GPT-3 model and setting it up with a webhook code hosted on repl.it. Our goal is to have a chatbot that is capable of maintaining context throughout the conversation. However, we have encountered an issue with the lifespan of the chatbot and would appreciate some guidance on the matter.
We are currently working with the davinci model and have noticed that it does not seem to maintain context throughout the conversation. We would like to know if the lifespan of the davinci model can be set to a value greater than 0, and if so, how this can be done.
We have written the following webhook code to handle our chatbot, and we would like to know if it needs adjusting in any way to better handle lifespan:
// medium hook
const express = require("express");
require("actions-on-google")
// require('dotenv').config();
const axios = require('axios');
const { WebhookClient } = require("dialogflow-fulfillment");
const app = express();
app.post("/dialogflow", express.json(), (req, res) => {
const agent = new WebhookClient({ request: req, response: res});
let intentMap = new Map();
intentMap.set("Default Welcome Intent", welcome);
intentMap.set("Default Fallback Intent", queryGPT);
agent.handleRequest(intentMap);
function welcome(agent) {
agent.add('Hi');
}
async function queryGPT(agent) {
// agent.add('Sorry! I am unable to understand this at the moment. I am still learning humans. You can pick any of the service that might help me.');
const instance = axios.create({
baseURL: 'https://api.openai.com/v1/',
headers: { Authorization: `Bearer ${process.env.OPENAI_API_KEY}` },
model: "text-davinci-003"
});
const dialog = [
`The following is a conversation with an AI assistant. I can answer your questions and provide information.
AI: Hello, How can I help you today?`,
];
let query = agent.query;
console.log('querytext ', query)
dialog.push(`User: ${query}`);
dialog.push('AI:');
// agent.add(`you said ${query}`)
const completionParmas = {
prompt: dialog.join('\n'),
max_tokens: 60,
temperature: 0.85,
n: 1,
stream: false,
logprobs: null,
echo: false,
stop: '\n',
};
try {
const result = await instance.post('/engines/davinci/completions', completionParmas);
const botResponse = result.data.choices[0].text.trim();
agent.add(botResponse);
} catch (err) {
console.log(err);
agent.add('Sorry. Something went wrong. Can you say that again?');
}
}
});
const port = 3000;
app.listen(port, () => console.log(`App listening on port ${port}!`))
Thank you.