How can get the same kind of answer like platform.openai.com/playground/assistants in API-call

I created an assistant in platform.openai.com/playground/assistants and upload 23 PDF files, now when i test the assistant in the PLATFORM i got good answers, but when i tired to use it in API i got not correct answers.

this is the sample of my code to how i use it:

class OpenAIAssistant:
    def __init__(self, api_key, assistant_id):
        self.client = OpenAI(api_key=api_key)
        self.assistant_id = assistant_id
        self.console = Console()
        
        try:
            self.assistant_details = self.client.beta.assistants.retrieve(
                assistant_id=self.assistant_id
            )
        except OpenAIError as e:
            self.console.print(f"Error retrieving assistant details: {e}", style="red")
            raise e

        try:
            self.thread = self.client.beta.threads.create(
                    tool_resources={
                    "file_search": {
                        "vector_store_ids": [settings.VECTOR_STORE_ID]
                    }
                }
            )
        except OpenAIError as e:
            self.console.print(f"Error creating thread: {e}", style="red")
            raise e

    def ask_question(self, question, is_mobile=False):
        logging.debug(f"Question received: {question}")
        event_handler = MyEventHandler()

        try:
            # Send the initial message
            self.client.beta.threads.messages.create(
                thread_id=self.thread.id,
                role="user",
                content=question + (settings.CHAT_MOBILE_PROMPT if is_mobile else settings.CHAT_WEB_PTOMPT),
            )
            logging.debug(f"Message sent to OpenAI: {question}")

            # Stream the response
            with self.client.beta.threads.runs.stream(
                thread_id=self.thread.id,
                assistant_id=self.assistant_id,
                instructions=(settings.ASSISTANT_INSTRUCTIONS),
                event_handler=event_handler,
            ) as stream:
                for _ in stream:
                    while event_handler.response_chunks:
                        chunk = event_handler.response_chunks.pop(0)
                        logging.debug(f"Chunk received from OpenAI: {chunk}")
                        yield chunk  # Stream the raw chunk as soon as it's received
        except Exception as e:
            logging.error(f"Error in ask_question: {e}")
            yield f"Error: {e}"