Hi team, I am trying to attach files for use in the code interpreter while creating a new Response object however I am getting an ‘unknown_parameter’ error despite following the exact format of the documentation.
First off:
Code interpreter has a horrible bug. If you use the “auto” method for container creation, but then give chat inputs that don’t call the tool, the API continues creating new container IDs that are never provided to you in the API call response. You merely get billed an additional $0.03 for every input of “hello” or “what is the smallest dog”, and are billed over and over without warning and without a code interpreter ID you can use.
It also is just overwhelmingly horrible. The alternate then is that you must create your own container and provide it to incur only a single fee but an immediate fee again regardless of inputs. Then, that container will expire with 20 minutes of non-use, and any server-side conversation state will completely fail with unrecoverable errors.
If you still want to go at it, here’s some console Python chat code for you. It uses the previous response ID method and non-streaming simply because the alternate is massive event code and custom database, and supports nothing other than code interpreter. I just tacked the tool and support onto other example code I had open.
- A container ID is created but no other management of the lifecycle.
- Any files created that the AI cites for you will automatically be downloaded to a
code-filessub-directory. - There is a function to upload, but no interface to upload. You could hard-code a list of files to iterate over to upload to the container after it is created.
- exit, and the response IDs are cleaned from the server (and the container is allowed to expire).
- original container code, because the API reference doesn’t provide SDK methods, and they have been wrong in documentation.
You can ask the AI for something like a file with a list of random numbers, and receive that as proof of it running python tool without ‘unknown_parameter’.
import os
import mimetypes
from pathlib import Path
from typing import Final
import httpx
from openai import OpenAI
# ------------------------------------------------------------
# misc helpers – clean up server-side Response objects
# ------------------------------------------------------------
def delete_response_id(resp_id: str | None) -> None:
if resp_id:
try:
client.responses.delete(resp_id)
except Exception:
pass
def delete_response_ids(ids: list[str | None] | None) -> None:
if ids:
for rid in ids:
delete_response_id(rid)
# ------------------------------------------------------------
# Code-Interpreter helpers
# ------------------------------------------------------------
_SUPPORTED_MIME_MAP: Final[dict[str, str]] = {
".c": "text/x-c",
".cs": "text/x-csharp",
".cpp": "text/x-c++",
".csv": "text/csv",
".doc": "application/msword",
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
".html": "text/html",
".java": "text/x-java",
".json": "application/json",
".md": "text/markdown",
".pdf": "application/pdf",
".php": "text/x-php",
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
".py": "text/x-python",
".rb": "text/x-ruby",
".tex": "text/x-tex",
".txt": "text/plain",
".css": "text/css",
".js": "text/javascript",
".sh": "application/x-sh",
".ts": "application/typescript",
".jpeg": "image/jpeg",
".jpg": "image/jpeg",
".gif": "image/gif",
".pkl": "application/octet-stream",
".png": "image/png",
".tar": "application/x-tar",
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
".xml": "application/xml",
".zip": "application/zip",
}
def _guess_mime(path: Path) -> str:
ext = path.suffix.lower()
if ext in _SUPPORTED_MIME_MAP:
return _SUPPORTED_MIME_MAP[ext]
mt, _ = mimetypes.guess_type(path.name)
return mt or "application/octet-stream"
def create_code_container(name: str = "test") -> str:
api_key = os.getenv("OPENAI_API_KEY") or ""
with httpx.Client(timeout=20.0) as c:
r = c.post(
"https://api.openai.com/v1/containers",
headers={"Authorization": f"Bearer {api_key}"},
json={"name": name},
)
r.raise_for_status()
return r.json()["id"]
def post_code_file(container: str, path: str | Path) -> dict:
api_key = os.getenv("OPENAI_API_KEY") or ""
p = Path(path)
if not p.is_file():
raise FileNotFoundError(p)
with p.open("rb") as f, httpx.Client(timeout=120.0) as c:
r = c.post(
f"https://api.openai.com/v1/containers/{container}/files",
headers={"Authorization": f"Bearer {api_key}"},
files={"file": (p.name, f, _guess_mime(p))},
)
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
print("Container upload failed:", e)
if e.response is not None:
print("Error body:", e.response.text)
raise
return r.json()
# ------------------------------------------------------------
# download helper for AI-generated container files
# ------------------------------------------------------------
_SAVE_DIR = Path("./code-files")
def _ensure_dir(d: Path) -> None:
d.mkdir(parents=True, exist_ok=True)
def _unique_path(base: Path) -> Path:
"""if file exists, append _1, _2 …"""
if not base.exists():
return base
stem, suffix = base.stem, base.suffix
i = 1
while True:
candidate = base.with_name(f"{stem}_{i}{suffix}")
if not candidate.exists():
return candidate
i += 1
def download_container_file(container_id: str, file_id: str, filename: str) -> Path:
"""GET binary bytes of a container file and save under ./code-files/"""
api_key = os.getenv("OPENAI_API_KEY") or ""
_ensure_dir(_SAVE_DIR)
target = _unique_path(_SAVE_DIR / filename)
url = (
f"https://api.openai.com/v1/containers/{container_id}/files/{file_id}/content"
)
with httpx.Client(timeout=120.0) as c, c.stream(
"GET", url, headers={"Authorization": f"Bearer {api_key}"}
) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
print("Download failed:", e)
if e.response is not None:
print("Error body:", e.response.text)
raise
with target.open("wb") as out:
for chunk in r.iter_bytes():
out.write(chunk)
return target
# ------------------------------------------------------------
# Responses demo – Code-Interpreter chat
# ------------------------------------------------------------
instructions = (
"You are ChatCoder, a helpful programming expert. "
"Use analysis python tool for any numeric calculation or algorithmic verification."
)
user_input = "*automated*: Produce a one-sentence user welcome message to the chatbot"
client = OpenAI()
previous_id = None
previous_response_ids: list[str | None] = []
container_id = create_code_container()
while user_input != "exit":
response = client.responses.create(
model="o4-mini",
input=user_input,
include=["code_interpreter_call.outputs"],
max_output_tokens=16_000,
previous_response_id=previous_id,
reasoning={"effort": "low"},
tools=[{"type": "code_interpreter", "container": container_id}],
store=True,
stream=False,
)
previous_response_ids.append(response.id)
previous_id = response.id
# --- show text + auto-download cited files -----------------------------
for idx, item in enumerate(response.output):
if getattr(item, "type", None) != "message":
continue
for element in getattr(item, "content", []):
text = getattr(element, "text", None)
if text:
print(f"\n[Output item {idx}]:\n{text}")
# look for container_file_citation annotations
for ann in getattr(element, "annotations", []):
if getattr(ann, "type", None) == "container_file_citation":
fid = getattr(ann, "file_id", None)
fname = getattr(ann, "filename", f"{fid}")
if fid:
local_path = download_container_file(
container_id, fid, fname
)
print(f"[downloaded] {fname} -> {local_path}")
print("-" * 40)
user_input = input("\nPrompt (or 'exit' to quit): ").strip()
# tidy Responses artifacts
delete_response_ids(previous_response_ids)
In the future, you might give even the slightest hint what API call you are attempting or what programming language you are using.
Thanks a lot for your reply! I’m building a shift planner app in Flutter using OpenAI, but the generated results don’t always follow all the scheduling rules correctly.
I need to validate the generated schedule against the defined rules, and regenerate it if it doesn’t comply.
Do you have any suggestions or best practices for improving this?