GPT-5 API consumes credits and does not return response

Hi all,

I was running a batch of API calls to GPT-5 - I started to run it last morning, and it worked perfectly until last afternoon. Suddenly, the 67th call started to give “RemoteProtocolError: Server disconnected without sending a response.” error, and the worst part is that I’ve already lost $ 10 in credits, so the request is processed, but I don’t receive any response. Even the same payload that worked before stopped working in the last 16 hours.

I tried to change the default 10-minute timeout in this way:

from openai import OpenAI
client = OpenAI(timeout=2400)

And here’s my code, when I ask the LLM to analyze a GitHub diff in a specific way:

for i, row in tqdm(tp_df_test.iterrows()):
    if i >= len(responses):
        diff = row['diff']
        prompt = prompt_preffix + "\n<diff>\n" + diff + "\n</diff>"
        prompt = sanitize_text(prompt)
        response = client.responses.create(model="gpt-5", input=prompt) #medium reasoning
        print(response.output_text)
        responses.append(response.output_text)

I tried the same prompt in ChatGPT, and it worked on the second attempt. But as I said, the code above suddenly stopped working yesterday.

I received the following stack trace:

---------------------------------------------------------------------------
RemoteProtocolError                       Traceback (most recent call last)
File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpx\_transports\default.py:101, in map_httpcore_exceptions()
    100 try:
--> 101     yield
    102 except Exception as exc:

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpx\_transports\default.py:250, in HTTPTransport.handle_request(self, request)
    249 with map_httpcore_exceptions():
--> 250     resp = self._pool.handle_request(req)
    252 assert isinstance(resp.stream, typing.Iterable)

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpcore\_sync\connection_pool.py:256, in ConnectionPool.handle_request(self, request)
    255     self._close_connections(closing)
--> 256     raise exc from None
    258 # Return the response. Note that in this case we still have to manage
    259 # the point at which the response is closed.

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpcore\_sync\connection_pool.py:236, in ConnectionPool.handle_request(self, request)
    234 try:
    235     # Send the request on the assigned connection.
--> 236     response = connection.handle_request(
    237         pool_request.request
    238     )
    239 except ConnectionNotAvailable:
    240     # In some cases a connection may initially be available to
    241     # handle a request, but then become unavailable.
    242     #
    243     # In this case we clear the connection and try again.

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpcore\_sync\connection.py:103, in HTTPConnection.handle_request(self, request)
    101     raise exc
--> 103 return self._connection.handle_request(request)

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpcore\_sync\http11.py:136, in HTTP11Connection.handle_request(self, request)
    135         self._response_closed()
--> 136 raise exc

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpcore\_sync\http11.py:106, in HTTP11Connection.handle_request(self, request)
     97 with Trace(
     98     "receive_response_headers", logger, request, kwargs
     99 ) as trace:
    100     (
    101         http_version,
    102         status,
    103         reason_phrase,
    104         headers,
    105         trailing_data,
--> 106     ) = self._receive_response_headers(**kwargs)
    107     trace.return_value = (
    108         http_version,
    109         status,
    110         reason_phrase,
    111         headers,
    112     )

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpcore\_sync\http11.py:177, in HTTP11Connection._receive_response_headers(self, request)
    176 while True:
--> 177     event = self._receive_event(timeout=timeout)
    178     if isinstance(event, h11.Response):

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpcore\_sync\http11.py:231, in HTTP11Connection._receive_event(self, timeout)
    230     msg = "Server disconnected without sending a response."
--> 231     raise RemoteProtocolError(msg)
    233 self._h11_state.receive_data(data)

RemoteProtocolError: Server disconnected without sending a response.

The above exception was the direct cause of the following exception:

RemoteProtocolError                       Traceback (most recent call last)
File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\openai\_base_client.py:982, in SyncAPIClient.request(self, cast_to, options, stream, stream_cls)
    981 try:
--> 982     response = self._client.send(
    983         request,
    984         stream=stream or self._should_stream_response_body(request=request),
    985         **kwargs,
    986     )
    987 except httpx.TimeoutException as err:

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpx\_client.py:914, in Client.send(self, request, stream, auth, follow_redirects)
    912 auth = self._build_request_auth(request, auth)
--> 914 response = self._send_handling_auth(
    915     request,
    916     auth=auth,
    917     follow_redirects=follow_redirects,
    918     history=[],
    919 )
    920 try:

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpx\_client.py:942, in Client._send_handling_auth(self, request, auth, follow_redirects, history)
    941 while True:
--> 942     response = self._send_handling_redirects(
    943         request,
    944         follow_redirects=follow_redirects,
    945         history=history,
    946     )
    947     try:

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpx\_client.py:979, in Client._send_handling_redirects(self, request, follow_redirects, history)
    977     hook(request)
--> 979 response = self._send_single_request(request)
    980 try:

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpx\_client.py:1014, in Client._send_single_request(self, request)
   1013 with request_context(request=request):
-> 1014     response = transport.handle_request(request)
   1016 assert isinstance(response.stream, SyncByteStream)

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpx\_transports\default.py:249, in HTTPTransport.handle_request(self, request)
    237 req = httpcore.Request(
    238     method=request.method,
    239     url=httpcore.URL(
   (...)    247     extensions=request.extensions,
    248 )
--> 249 with map_httpcore_exceptions():
    250     resp = self._pool.handle_request(req)

File ~\anaconda3\Lib\contextlib.py:158, in _GeneratorContextManager.__exit__(self, typ, value, traceback)
    157 try:
--> 158     self.gen.throw(value)
    159 except StopIteration as exc:
    160     # Suppress StopIteration *unless* it's the same exception that
    161     # was passed to throw().  This prevents a StopIteration
    162     # raised inside the "with" statement from being suppressed.

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\httpx\_transports\default.py:118, in map_httpcore_exceptions()
    117 message = str(exc)
--> 118 raise mapped_exc(message) from exc

RemoteProtocolError: Server disconnected without sending a response.

The above exception was the direct cause of the following exception:

APIConnectionError                        Traceback (most recent call last)
Cell In[32], line 11
      9 prompt = prompt_preffix + "\n<diff>\n" + diff + "\n</diff>"
     10 prompt = sanitize_text(prompt)
---> 11 response = client.responses.create(model="gpt-5", input=prompt) #medium reasoning
     12 print(response.output_text)
     13 responses.append(response.output_text)

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\openai\resources\responses\responses.py:840, in Responses.create(self, background, conversation, include, input, instructions, max_output_tokens, max_tool_calls, metadata, model, parallel_tool_calls, previous_response_id, prompt, prompt_cache_key, reasoning, safety_identifier, service_tier, store, stream, stream_options, temperature, text, tool_choice, tools, top_logprobs, top_p, truncation, user, extra_headers, extra_query, extra_body, timeout)
    803 def create(
    804     self,
    805     *,
   (...)    838     timeout: float | httpx.Timeout | None | NotGiven = not_given,
    839 ) -> Response | Stream[ResponseStreamEvent]:
--> 840     return self._post(
    841         "/responses",
    842         body=maybe_transform(
    843             {
    844                 "background": background,
    845                 "conversation": conversation,
    846                 "include": include,
    847                 "input": input,
    848                 "instructions": instructions,
    849                 "max_output_tokens": max_output_tokens,
    850                 "max_tool_calls": max_tool_calls,
    851                 "metadata": metadata,
    852                 "model": model,
    853                 "parallel_tool_calls": parallel_tool_calls,
    854                 "previous_response_id": previous_response_id,
    855                 "prompt": prompt,
    856                 "prompt_cache_key": prompt_cache_key,
    857                 "reasoning": reasoning,
    858                 "safety_identifier": safety_identifier,
    859                 "service_tier": service_tier,
    860                 "store": store,
    861                 "stream": stream,
    862                 "stream_options": stream_options,
    863                 "temperature": temperature,
    864                 "text": text,
    865                 "tool_choice": tool_choice,
    866                 "tools": tools,
    867                 "top_logprobs": top_logprobs,
    868                 "top_p": top_p,
    869                 "truncation": truncation,
    870                 "user": user,
    871             },
    872             response_create_params.ResponseCreateParamsStreaming
    873             if stream
    874             else response_create_params.ResponseCreateParamsNonStreaming,
    875         ),
    876         options=make_request_options(
    877             extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
    878         ),
    879         cast_to=Response,
    880         stream=stream or False,
    881         stream_cls=Stream[ResponseStreamEvent],
    882     )

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\openai\_base_client.py:1259, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
   1245 def post(
   1246     self,
   1247     path: str,
   (...)   1254     stream_cls: type[_StreamT] | None = None,
   1255 ) -> ResponseT | _StreamT:
   1256     opts = FinalRequestOptions.construct(
   1257         method="post", url=path, json_data=body, files=to_httpx_files(files), **options
   1258     )
-> 1259     return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))

File ~\Documents\TCU\Setid\NIA\experimentos\notebooks\Normas\.venv\Lib\site-packages\openai\_base_client.py:1014, in SyncAPIClient.request(self, cast_to, options, stream, stream_cls)
   1011         continue
   1013     log.debug("Raising connection error")
-> 1014     raise APIConnectionError(request=request) from err
   1016 log.debug(
   1017     'HTTP Response: %s %s "%i %s" %s',
   1018     request.method,
   (...)   1022     response.headers,
   1023 )
   1024 log.debug("request_id: %s", response.headers.get("x-request-id"))

APIConnectionError: Connection error.

Does anyone have any idea about how to deal with this problem?

Thanks in advance.