Although this thread is likely to scroll and disappear like they all do, I typed up a little “code as edits API documentation” Python - specifically for using the somewhat superfluous library module which doesn’t offer much in this case.
With multiple input images as vision understanding, and also showing collection of the multiple outputs you can request, along with bytesio in-memory objects both in front and after the actual API use, hopefully this code is right-sized to communicate a foundation in understanding the parameters without needing to look elsewhere.
import base64
from io import BytesIO
from pathlib import Path
import openai
edit_prompt = """Outfill: complete the rest of the top of the image."""
infile_list = ["input_file.png", ] # list[str] - file paths, 16 maximum
outfile_format = "png" # "png", "jpeg", or "webp" - also verify output_compression
# ---- Load files into memory as BytesIO (application state example) ----
# FileTypes as input: file bytes, an io.IOBase instance, PathLike or a tuple
input_file_objects: list[BytesIO] = []
for path_str in infile_list:
path = Path(path_str)
with path.open("rb") as f:
file_bytes = f.read()
bio = BytesIO(file_bytes)
bio.name = path.name # ensures filename metadata for API multipart handling
input_file_objects.append(bio)
print(f"Sending image edit request using {infile_list}")
client = openai.OpenAI()
response = client.images.edit(
model="gpt-image-1.5", # Union[str, ImageModel, None] -- model id; default dall-e-2
prompt=edit_prompt, # str - required prompt text
image=input_file_objects, # Union[FileTypes, SequenceNotStr[FileTypes]] - single file or list
input_fidelity="low", # Optional[Literal["high", "low"]] - gpt-image-1.5 forces "high"
# mask=maskfile # FileTypes | Omit - image mask file using transparency alpha
n=1, # Optional[int] | Omit - number of images (1–10); None or omit uses default 1
size="1024x1024", # Optional[Literal["256x256","512x512","1024x1024","1536x1024","1024x1536","auto"]]
quality="low", # Optional[Literal["standard","low","medium","high","auto"]]
output_format=outfile_format, # Optional[Literal["png","jpeg","webp"]] | Omit - encoded output format
# output_compression=95, # Optional[int] | Omit - compression level for jpeg/webp only (0–100)
background="opaque", # Optional[Literal["transparent","opaque","auto"]] | Omit - jpeg unsupported
stream=False, # Optional[Literal[False]] | Literal[True] | Omit - enable streaming events
partial_images=0, # Optional[int] | Omit - streaming-only progressive images count
user="myCustomer", # str | Omit - end-user identifier for tracking/abuse signals
# response_format= xxx # ONLY dall-e-2 -- Optional[Literal["url", "b64_json"]] | Omit
)
# note: Omit is the OpenAI library sentinel for nullable
output_images_in_memory: list[bytes] = []
if response.data:
for img in response.data:
if img.b64_json:
img_bytes = base64.b64decode(img.b64_json)
output_images_in_memory.append(img_bytes)
print(f"Received {len(output_images_in_memory)} edited image(s)")
save_index = 0
for original_path in infile_list:
original_stem = Path(original_path).stem
for _ in range(len(output_images_in_memory) // len(infile_list)):
if save_index >= len(output_images_in_memory):
break
outfile_name = f"{original_stem}_edit{save_index}.{outfile_format}"
with open(outfile_name, "wb") as f:
f.write(output_images_in_memory[save_index])
print(f"Saved {outfile_name}")
save_index += 1
print(response.usage.model_dump())
response__model_fields = """
ImagesResponse:
{
'created': FieldInfo(annotation=int, required=True),
'background': FieldInfo(annotation=Union[Literal['transparent', 'opaque'], NoneType], required=False, default=None),
'data': FieldInfo(annotation=Union[List[Image], NoneType], required=False, default=None),
'output_format': FieldInfo(annotation=Union[Literal['png', 'webp', 'jpeg'], NoneType], required=False, default=None),
'quality': FieldInfo(annotation=Union[Literal['low', 'medium', 'high'], NoneType], required=False, default=None),
'size': FieldInfo(annotation=Union[Literal['1024x1024', '1024x1536', '1536x1024'], NoneType], required=False, default=None),
'usage': FieldInfo(annotation=Union[Usage, NoneType], required=False, default=None)
}
Image:
{
'b64_json': FieldInfo(annotation=Union[str, NoneType], required=False, default=None),
'revised_prompt': FieldInfo(annotation=Union[str, NoneType], required=False, default=None),
'url': FieldInfo(annotation=Union[str, NoneType], required=False, default=None)
}
"""
A notebook might be better communication, but not on this forum..