Migrate assistant to responses API?

I have a working assistant which anaylzes a txt-file using the following code:

import os, sys
from urllib.parse import urlparse
from dotenv import load_dotenv
from openai import OpenAI

path = os.path.abspath(os.path.dirname(sys.argv[0]))  
load_dotenv(os.path.join(path, ".env"))
CHATGPT_API_KEY = os.environ.get("CHATGPT_API_KEY")

client = OpenAI(api_key = CHATGPT_API_KEY)
fn = os.path.join(path, "prompt.txt")
with open(fn, encoding="utf-8", errors="ignore") as f:
  lines = f.read().splitlines()
question = "\n".join(lines)

### create content using openai
fn = os.path.join(path, "transscript.txt")
vector_store = client.vector_stores.create(name="TXT-File")
file_paths = [fn]
file_streams = [open(path, "rb") for path in file_paths]
file_batch = client.vector_stores.file_batches.upload_and_poll(
  vector_store_id=vector_store.id, files=file_streams
)
print(file_batch.status)
print(file_batch.file_counts)

print(f"Preparing assistant")
assistant = client.beta.assistants.create(
  name="Document Analyse Assistant",
  instructions="You are a machine learning researcher, answer questions about the provided txt-file",
  model="gpt-4o-mini",
  tools=[{"type": "file_search"}],
)

assistant = client.beta.assistants.update(
  assistant_id=assistant.id,
  tool_resources={"file_search": {"vector_store_ids": [vector_store.id]}},
)

print(f"Preparing thread")
thread = client.beta.threads.create()
print(f"Preparing question")
results = client.beta.threads.messages.create(
  thread_id = thread.id,
  role = "user",
  content = question
)

print(f"Running for answer")
run = client.beta.threads.runs.create (
  thread_id = thread.id,
  assistant_id = assistant.id
)

while run.status not in ["completed", "failed"]:
  run = client.beta.threads.runs.retrieve (
    thread_id = thread.id,
    run_id = run.id
  )

if run.status == "completed":
  results = client.beta.threads.messages.list(
      thread_id=thread.id
  )
  resultAnswer = results.data[0].content[0].text.value
  print(resultAnswer)

Now i get several deprecation warnings:

C:\DEVNEU\Fiverr2025\TRY\blankpraxis\test1.py:47: DeprecationWarning: The Assistants API is deprecated in favor of the Responses API
thread = client.beta.threads.create()
Preparing question
C:\DEVNEU\Fiverr2025\TRY\blankpraxis\test1.py:49: DeprecationWarning: The Assistants API is deprecated in favor of the Responses API
results = client.beta.threads.messages.create(
Running for answer
C:\DEVNEU\Fiverr2025\TRY\blankpraxis\test1.py:55: DeprecationWarning: The Assistants API is deprecated in favor of the Responses API
run = client.beta.threads.runs.create (
C:\DEVNEU\Fiverr2025\TRY\blankpraxis\test1.py:60: DeprecationWarning: The Assistants API is deprecated in favor of the Responses API
run = client.beta.threads.runs.retrieve (
C:\DEVNEU\Fiverr2025\TRY\blankpraxis\test1.py:65: DeprecationWarning: The Assistants API is deprecated in favor of the Responses API
results = client.beta.threads.messages.list(

I tried to follow the migration guide from:
https://platform.openai.com/docs/assistants/migration?user-chat-app=assistants#migrating-your-integration

But how can upload the file which should be anylized.
I tried it with that changed code but it did not work properly -

import os, sys
from urllib.parse import urlparse
from dotenv import load_dotenv
from openai import OpenAI

path = os.path.abspath(os.path.dirname(sys.argv[0]))
fn = os.path.join(path, ".env")
load_dotenv(fn)
CHATGPT_API_KEY = os.environ.get("CHATGPT_API_KEY")
client = OpenAI(api_key = CHATGPT_API_KEY)

fn = os.path.join(path, "prompt.txt")
with open(fn, encoding="utf-8", errors="ignore") as f:
lines = f.read().splitlines()
question = "\n".join(lines)

### create content using openai
fn = os.path.join(path, "transscript.txt")
vector_store = client.vector_stores.create(name="TXT-File")
file_paths = [fn]
file_streams = [open(path, "rb") for path in file_paths]
file_batch = client.vector_stores.file_batches.upload_and_poll(
vector_store_id=vector_store.id, files=file_streams
)

print(file_batch.status)
print(file_batch.file_counts)

response = client.responses.create(
model="gpt-4.1",
input=[{"role": "user", "content": question}],
tools=[{"type": "file_search"}],
tool_resources={"file_search": {"vector_store_ids": [vector_store.id]}}
)
print(response

I get this error:

(openaiALL) C:\DEVNEU\Fiverr2025\TRY\blankpraxis>python test2.py
completed
FileCounts(cancelled=0, completed=1, failed=0, in_progress=0, total=1)
Traceback (most recent call last):
File “C:\DEVNEU\Fiverr2025\TRY\blankpraxis\test2.py”, line 29, in
response = client.responses.create(
model=“gpt-4.1”,
…<2 lines>…
tool_resources={“file_search”: {“vector_store_ids”: [vector_store.id]}}
)
TypeError: Responses.create() got an unexpected keyword argument ‘tool_resources’

You’re very close — the issue is mostly conceptual.

In the Responses API, you don’t “attach” tool_resources at call time like in Assistants.
Instead, you either:

  1. Reference files directly in the input, or

  2. Use file_search with vector stores that are already associated implicitly.

That’s why tool_resources raises an error — it’s not a valid argument for responses.create().

For file-based analysis, the recommended flow is:

  • Upload files (or build a vector store)

  • Let file_search resolve context automatically

  • Pass only tools=[{"type": "file_search"}] in the response call

No threads, no runs, no polling loop — Responses is single-call and stateless by design.

If you need multi-turn state or orchestration, that logic now lives in your app, not the API.

This is less a 1:1 migration and more a shift from “managed assistant lifecycle” to “explicit app-controlled flow.”

Thanks a lot for your response -

I tried it now with this code:

import os, sys
from urllib.parse import urlparse
from dotenv import load_dotenv
from openai import OpenAI

path = os.path.abspath(os.path.dirname(sys.argv[0])) 
fn = os.path.join(path, ".env")
load_dotenv(fn)
CHATGPT_API_KEY = os.environ.get("CHATGPT_API_KEY")
client = OpenAI(api_key = CHATGPT_API_KEY)

fn = os.path.join(path, "prompt.txt")
with open(fn, encoding="utf-8", errors="ignore") as f:
  lines = f.read().splitlines()
question = "\n".join(lines)

### create content using openai
fn = os.path.join(path, "transscript.txt")
vector_store = client.vector_stores.create(name="TXT-File")
file_paths = [fn]
file_streams = [open(path, "rb") for path in file_paths]
file_batch = client.vector_stores.file_batches.upload_and_poll(
  vector_store_id=vector_store.id, files=file_streams
)
print(file_batch.status)
print(file_batch.file_counts)

response = client.responses.create(
  model="gpt-4o-mini",
  input=[{"role": "user", "content": question}],
  tools=[{"type": "file_search"}]
)
print(response)

But i still get an error:

(openaiALL) C:\DEVNEU\Fiverr2025\TRY\blankpraxis>python test2.py
completed
FileCounts(cancelled=0, completed=1, failed=0, in_progress=0, total=1)
Traceback (most recent call last):
File “C:\DEVNEU\Fiverr2025\TRY\blankpraxis\test2.py”, line 29, in
response = client.responses.create(
model=“gpt-4o-mini”,
input=[{“role”: “user”, “content”: question}],
tools=[{“type”: “file_search”}]
)
File “C:\DEVNEU.venv\openaiALL\Lib\site-packages\openai\resources\responses\responses.py”, line 773, in create
return self._post(
~~~~~~~~~~^
“/responses”,
^^^^^^^^^^^^^
…<38 lines>…
stream_cls=Stream[ResponseStreamEvent],
model=“gpt-4o-mini”,
input=[{“role”: “user”, “content”: question}],
tools=[{“type”: “file_search”}]
)
File “C:\DEVNEU.venv\openaiALL\Lib\site-packages\openai\resources\responses\responses.py”, line 773, in create
return self._post(
~~~~~~~~~~^
“/responses”,
^^^^^^^^^^^^^
…<38 lines>…
model=“gpt-4o-mini”,
input=[{“role”: “user”, “content”: question}],
tools=[{“type”: “file_search”}]
)
File “C:\DEVNEU.venv\openaiALL\Lib\site-packages\openai\resources\responses\responses.py”, line 773, in create
return self._post(
~~~~~~~~~~^
“/responses”,
^^^^^^^^^^^^^
model=“gpt-4o-mini”,
input=[{“role”: “user”, “content”: question}],
tools=[{“type”: “file_search”}]
)
File “C:\DEVNEU.venv\openaiALL\Lib\site-packages\openai\resources\responses\responses.py”, line 773, in create
return self._post(
~~~~~~~~~~^
model=“gpt-4o-mini”,
input=[{“role”: “user”, “content”: question}],
tools=[{“type”: “file_search”}]
)
File “C:\DEVNEU.venv\openaiALL\Lib\site-packages\openai\resources\responses\responses.py”, line 773, in create
model=“gpt-4o-mini”,
input=[{“role”: “user”, “content”: question}],
tools=[{“type”: “file_search”}]
)
File “C:\DEVNEU.venv\openaiALL\Lib\site-packages\openai\resources\responses\responses.py”, line 773, in create
return self._post(
~~~~~~~~~~^
“/responses”,
model=“gpt-4o-mini”,
input=[{“role”: “user”, “content”: question}],
tools=[{“type”: “file_search”}]
)
File “C:\DEVNEU.venv\openaiALL\Lib\site-packages\openai\resources\responses\responses.py”, line 773, in create
return self._post(
model=“gpt-4o-mini”,
input=[{“role”: “user”, “content”: question}],
tools=[{“type”: “file_search”}]
)
model=“gpt-4o-mini”,
input=[{“role”: “user”, “content”: question}],
tools=[{“type”: “file_search”}]
input=[{“role”: “user”, “content”: question}],
tools=[{“type”: “file_search”}]
)
File “C:\DEVNEU.venv\openaiALL\Lib\site-packages\openai\resources\responses\responses.py”, line 773, in create
return self._post(
~~~~~~~~~~^
“/responses”,
^^^^^^^^^^^^^
…<38 lines>…
stream_cls=Stream[ResponseStreamEvent],
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File “C:\DEVNEU.venv\openaiALL\Lib\site-packages\openai_base_client.py”, line 1259, in post
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “C:\DEVNEU.venv\openaiALL\Lib\site-packages\openai_base_client.py”, line 1047, in request
raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {‘error’: {‘message’: “Missing required parameter: ‘tools[0].vector_store_ids’.”, ‘type’: ‘invalid_request_error’, ‘param’: ‘tools[0].vector_store_ids’, ‘code’: ‘missing_required_parameter’}}

Any idea why this is still happening?
Do i have to use an additonal other parameter in the request?

I think i found the issue - with this response it finally seems to work fine:
Thanks for your help

resp = client.responses.create(
  model="gpt-4o-mini",
  input=[{"role": "user", "content": question}],
  tools=[{"type": "file_search", "vector_store_ids": [vector_store.id]}],
)
2 Likes