OpenAI Embedding model Answering issue

"Hello,

I am using OpenAI’s embedding-based model to train on context and obtain results based on context-related questions only. However, after the chat completion process, when I ask the same question many times, it does not always give the exact answer. Sometimes it responds with ‘I don’t know,’ as we set in our prompt. But if I repeat the question one more time, it gives the correct answer. Are there any methods to overcome this issue?

Thank you."

Hey mate,

Can I clarify a little further?

Firstly, you said you are using the embedding-based model? I am going to assume that you aren’t using ada-002, because that doesn’t make sense as it isn’t finetunable and doesn’t respond in text…

Do you mind sending a snippet of your code so we can have a look? Or could you provide more specific information.

I would like to set the page to Chinese. May I know where to set the path

def ask(
    query: str,
    openai_file_path:str,
    df: pd.DataFrame=None,
    model: str = GPT_MODEL,
) -> str:
    """Answers a query using GPT and a dataframe of relevant texts and embeddings."""
    embeddings_path = openai_file_path
    df = pd.read_csv(embeddings_path)
    df['embedding'] = df['embedding'].apply(ast.literal_eval)
    message = query_message(query, df)
    messages = [
        {"role": "system", "content": "You answer questions about  any faq."},
        {"role": "user", "content": message},
    ]
    
    print(message)
    print("#################################")
    response = openai.ChatCompletion.create(
        model=model,
        messages=messages,
        temperature=0
    )
    response_message = response["choices"][0]["message"]["content"]
    os.remove(openai_file_path)
    return response_message


def query_message(
    query: str,
    df: pd.DataFrame,
) -> str:
    """Return a message for GPT, with relevant source texts pulled from a dataframe."""
    strings, relatednesses = strings_ranked_by_relatedness(query, df)
    introduction = f'Use the below articles on any FAQ to answer the subsequent question. If the answer cannot be found in the articles, write {responseMessage}'
    question = f"\n\nQuestion: {query}"
    message = introduction
    for string in strings:
        next_article = f'"""\n{string}\n"""'
        message += next_article
    return message + question


def strings_ranked_by_relatedness(
    query: str,
    df: pd.DataFrame,
    relatedness_fn=lambda x, y: 1 - spatial.distance.cosine(x, y),
    top_n: int = 100
) -> tuple[list[str], list[float]]:
    """Returns a list of strings and relatednesses, sorted from most related to least."""
    query_embedding_response = openai.Embedding.create(
        model=EMBEDDING_MODEL,
        input=query,
    )
    query_embedding = query_embedding_response["data"][0]["embedding"]
    strings_and_relatednesses = [
        (row["text"], relatedness_fn(query_embedding, row["embedding"]))
        for i, row in df.iterrows()
    ]
    strings_and_relatednesses.sort(key=lambda x: x[1], reverse=True)
    strings, relatednesses = zip(*strings_and_relatednesses)
    return strings[:top_n], relatednesses[:top_n]

def num_tokens(text: str, model: str = GPT_MODEL) -> int:
    """Return the number of tokens in a string."""
    encoding = tiktoken.encoding_for_model(model)
    return len(encoding.encode(text))



# funtion to call heystack bert
def get_nlp_filtered_bert(_message,reader,filepath): 

# Initialize the document store
    document_store = InMemoryDocumentStore(use_bm25=True)

    training_file_path = filepath

    # Index the training file
    indexing_pipeline = TextIndexingPipeline(document_store)
    indexing_pipeline.run_batch(file_paths=[training_file_path])

    # Initialize the retriever
    retriever = BM25Retriever(document_store=document_store)

    # Create the pipeline
    pipe = ExtractiveQAPipeline(reader, retriever)

    # Provide a query and get predictions
    query = _message
    prediction = pipe.run(query=query, params={"Retriever": {"top_k": 5}, "Reader": {"top_k": 1}})
    answers = prediction["answers"]
    no_ans=prediction["no_ans_gap"]
    os.remove(filepath) 
    #return prediction not found message
    if no_ans < 0:
        return responseMessage
    
    #return predicted result
    for answer in answers:
        answer_text = answer.answer
        return answer_text