How I can Intergrate Dall-E into my Assistant created By Assistant API?

I am trying to Intergrate the Dall-E into Assistants API:

from pyexpat.errors import messages

from openai import OpenAI
from dotenv import load_dotenv

import os
import time

from lib.common import getPurpoce,getUniqueIdentifier,yes_or_no_prompt
from lib.image_generation import mkImage

load_dotenv()

thread_id = None

# Set your API key
client = OpenAI(
    # This is the default and can be omitted
    api_key=os.environ.get("OPENAI_API_KEY"),
)

def setMessageToThread(thread_id,message):

    if thread_id is None:
        print("Creating thread")
        thread = client.beta.threads.create(messages=[{'role': 'user', 'content': message}])
        thread_id = thread.id
    else:
        client.beta.threads.messages.create(
            thread_id,
            role="user",
            content=message,
        )

    return thread_id

def executingThread(thread_id,assistant_id):
    global  client

    print("Creating Response")
    run = client.beta.threads.runs.create(
        thread_id=thread_id,
        assistant_id=assistant_id,
    )

    while True:
        run_status = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id)
        if run_status.status == "completed":
            return run.id
        elif run_status.status == "failed":
            print("Run failed:", run_status.last_error)
            return None
        time.sleep(2)

def sendMessage(assistant_id,message):

    message = message.strip()
    if not messages:
        return None

    global thread_id,client

    thread_id = setMessageToThread(thread_id,message)

    run_id = executingThread(thread_id,assistant_id)
    if run_id is not None:
        response = client.beta.threads.messages.list(thread_id=thread_id,run_id=run_id)
        return response

    return None

def getAssistantId(directory):
    file = f"appdata/{getUniqueIdentifier(directory)}/assistant_id"

    assistant_id = None
    if os.path.isfile(file):
        with open(file, 'r') as fp:
            assistant_id = fp.read()

    return assistant_id

def print_messages(messages):
    for message in reversed(messages.data):
        role = message.role
        for content in message.content:
            if content.type == 'text':
                response = content.text.value
                print(f'\n{role}:\n \033[96m{response}\033[0m')
            else:
                print(content.type)
                print(content)

def extract_latest_response_from_assistant(response)->str:
    return response.data[0].content[0].text.value

if __name__ == "__main__":

    option = getPurpoce()
    directory = f"files/{option}"

    assistant_id = getAssistantId(directory)

    if(assistant_id is None):
        print("Assistant Id not Found")
        exit(-1)

    while True:
        print("++++++++++++++++++++++++++++++++++++++++++++")
        message = input("Enter your message to the ChatGpt Assistant\n")
        response = sendMessage(assistant_id, message)

        if response is None:
            print("Αδυναμία Απάντησης")
        else:
            message = extract_latest_response_from_assistant(response)
            print(f'\033[96m{message}\033[0m')
            if yes_or_no_prompt("Do you want to create an image for the response"):
                print("Creating Image")
                image = mkImage(client,message)
                print(f"Image available at: \033[96m{image}\033[0m")

What I did is I accept chat via a cli input and then let uset choose whether to crsate an Image using mkImage


def getImagePrompt(client,prompt):

    descriptionPrompt=f"Δημιούργησέ μου ένα prompt που θα δημιουργεί μια εικόνα για το κάτωθι Post:\n {prompt}"
    completion = client.chat.completions.create(
        model="gpt-4o",
        messages=[
            {"role": "system", "content": "You are an assistant that created dall-E image descriptions."},
            {"role": "user", "content": descriptionPrompt}
        ]
    )

    return completion.choices[0].message.content


def mkImage(client,prompt):

    prompt = prompt.strip()

    if prompt == '':
        raise ValueError("Prompt should not be empty")

    imgPrompt = getImagePrompt(client,prompt)
    print(imgPrompt)
    response = client.images.generate(
        model="dall-e-3",
        prompt=imgPrompt,
        n=1,
        size="1024x1024"
    )

    return response.data[0].url

The assistant was made like this:

from openai import OpenAI,NOT_GIVEN
from dotenv import load_dotenv

load_dotenv()

client = OpenAI(
    api_key=os.environ.get("OPENAI_API_KEY"),
)

# Vector Id I already have at OpenAI
vector_id = "xxxx"

client.beta.assistants.update(assistant_id,
                                      description="Some Description",
                                      instructions="Chat Instructions",
                                      model="gpt-4o",
                                      tools=[{"type":"file_search"}],
                                      tool_resources={"file_search":{"vector_store_ids":[vector_id]}})

What I do is that I create a response then I let chat completion to create the Dall-E description and then I forward it into Dall-E.

But for that I do excess Api Calls. How I can seamlessly intergrate Dall-E into my Assistant?