How can handle continuous chat with Chatgpt in next Js

I created a voice assistant using ChatGPT. It is working fine, but I would like something like this. Please follow the example.
User - who is Sachin Tendulkar
GPT- Sachin Ramesh Tendulkar is an Indian former international cricketer who captained the Indian national team so on …
User - what is his age
GPT - he is 50 years old like so on …
so i want like this

i have code it in next js

async function gpt(prompt) {
        try {
            setGptloading(true);

            const response = await fetch("/api/chat", {
                method: "POST",
                headers: {
                    "Content-Type": "application/json",
                },
                body: JSON.stringify({
                    prompt,
                }),
            });
            if (!response.ok) {
                toast.error("Error in gpt :", response.statusText);
            } else {
                setText('')
            }

            const reader = response.body
                .pipeThrough(new TextDecoderStream())
                .getReader()
            setIsListening(false);
            setJustdata(true);
            setGptloading(false);
            let done = false;
            try {
                while (true) {
                    if (streamingStoppedRef.current) {
                        break;
                    }
                    const { value, done } = await reader.read();
                    if (done) break;
                    setText((prev) => prev + value);
                   
            }   catch (error) {
            console.error("Error in gpt function:", error);
            toast.error("Error in gpt function:", error);
            setGptloading(false);

        }

    }
import { Configuration, OpenAIApi } from 'openai-edge';
import { OpenAIStream, StreamingTextResponse } from 'ai';

// Create an OpenAI API client (that's edge friendly!)
const config = new Configuration({
  apiKey: process.env.CHATGPT_API_KEY,
});
const openai = new OpenAIApi(config);
// Set the runtime to edge for best performance
export const runtime = 'edge';

export default async function handler(req, context) {
  const {prompt } = await req.json();

  // Ask OpenAI for a streaming completion given the prompt
  const response = await openai.createChatCompletion({
    model: 'gpt-3.5-turbo',
    n: 1,
    temperature: 0.3,
    stream: true,
    messages: [
      {
        role: 'user',
        content: prompt,
      },
    ],
  });

  // Convert the response into a friendly text-stream
  const stream = OpenAIStream(response);
  // Respond with the stream
  return new StreamingTextResponse(stream);
}
 <div className="Gpt_response" ref={textRef}>
                                        <p id="chatgptId" dangerouslySetInnerHTML={{ __html: text }} />

                                    </div>

here i am getting response