Ok… I can put the entire code below… I am using Next.js, Pinecone, and OpenAI.
Basically… once I get the matches back from the data in pinecone… What do I do with them to convert it into a proper natural language response from OpenAI?
For example… the last few lines of code look like this… then… what next?
const embeddingResult2 = await openai.embeddings.create({
input: message,
model: “text-embedding-ada-002”,
});const vector2 = embeddingResult2.data[0].embedding;
const pineconeResult2 = await pinecone.query(test1, {
vector: vector2,
top: 5,
});const matches = pineconeResult2.matches; // array of matches
console.log(matches);
^^^ How do I now feed it to OpenAI so I get something else beside an array?
… OR… How does it work? Am in the right track???
STEPS AND LONG CODE HERE:
I receive a message from the frontend (leave it there for now)
I have a Q&A data which I need to prepare. slice, chunk it 2048 ok?, put it in an array, create embeddings, concat it, put it in a vector…
feed the embedding / vectors to an already created pinecone index…
create another embedding this time with the message from the frontend.
get the new embedding response and feed it to the pinecone query for my existing index.
get the pinecone response / matches which are an array.
Now what? — How does OpenAI generate a response from that?
import { Pinecone } from “@pinecone-database/pinecone”;
import { OpenAI } from “openai”;
import Bot from “…/…/utils/bot.json”;// const { OpenAI } = require(“openai”);
// const Bot = require(“…/…/utils/bot.json”);const Data = async (req, res) => {
const message = req.body.newText;
const firstOne = req.body.firstMsg;const pinecone = new Pinecone({
apiKey: process,
environment: process.env.PINECONE_ENVIRONMENT,
});
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});let inputs = Bot.slice();
let embeddings = ;
while (inputs.length) {
let tokenCount = 0;
let batchedInputs = ;
while (inputs.length && tokenCount < 2048) {
let input = inputs.shift();
batchedInputs.push(input);
tokenCount += input.split(“”).length;
}
}let embeddingResult = await openai.embeddings.create({
input: batchedInputs,
model: “text-embedding-ada-002”,
});console.log(embeddingResult);
embeddings = embeddings.concat(
embeddingResult.data.map((entry) => entry.embedding)
);let vectors = bot.map((bot, i) => {
return {
id: bot.id,
metadata: {
question: bot.question,
answer: bot.answer,
},
values: embeddings[i],
};
});let insertBatches = ;
while (vectors.length) {
let batch = vectors.splice(0, 250);let index = pinecone.index(test1); let indexResults = await index.upsert({ vectors: batch, }); insertBatches.push(indexResults);
}
console.log(insertBatches);const embeddingResult2 = await openai.embeddings.create({
input: message,
model: “text-embedding-ada-002”,
});const vector2 = embeddingResult2.data[0].embedding;
const pineconeResult2 = await pinecone.query(test1, {
vector: vector2,
top: 5,
});const matches = pineconeResult2.matches; // array of matches
console.log(matches);
};export default Data;