Hello all,
I am developing chat app using ChromaDB as verctor db as retriever with “create_retrieval_chain”.
Question:
- How can we check vector store data?
- how can we check whether the question got any supporting document from vector db retriever?
# Fetch the vector database (CHROMA DB)
vector_db = get_vector_db()
# Initialize the language model with the OpenAI API key and model name from environment variables
llm = ChatOpenAI(
api_key=os.environ["OPENAI_API_KEY"]
, model_name=os.environ["OPENAI_API_GPT_MODEL"]
, temperature=0.2
)
# Define the prompt template for the document chain
document_chain_prompt = ChatPromptTemplate.from_messages([
("system", "Answer the user's questions based on the below context:\n\n{context}"),
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
])
# Create the document chain using the language model and the prompt template
document_chain = create_stuff_documents_chain(llm, document_chain_prompt)
# Define the prompt template for generating a search query based on the chat history
history_aware_retriever_chain_prompt = ChatPromptTemplate.from_messages([
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
("user", "Given the above conversation, generate a search query to look up to get information relevant to the conversation")
])
# Configure the retriever to search for similar documents with a similarity score threshold
vector_db_retriever = vector_db.as_retriever(
search_type="similarity_score_threshold"
, search_kwargs={
"score_threshold": 0.5
, "k": int(os.environ["related_doc_count"])
}
)
# Create a history-aware retriever chain using the language model, retriever, and the prompt template
history_aware_retriever_chain = create_history_aware_retriever(llm, vector_db_retriever, history_aware_retriever_chain_prompt)
# Create a retrieval chain combining the history-aware retriever chain and the document chain
retrieval_chain = create_retrieval_chain(history_aware_retriever_chain, document_chain)
# Invoke the retrieval chain with the chat history and user input
response = retrieval_chain.invoke({
"chat_history": chat_history,
"input": prompt
})
# Update the chat history with the new human messages
chat_history.append(HumanMessage(content=prompt))
# Update the chat history with the new AI messages
chat_history.append(AIMessage(content=response["answer"]))