Data on completions stream response is cut off in the middle

OpenAi completions stream response is not in the correct format.
Im getting chunks that is not a valid json format, it cut off in the middle.
For example:
part 1:
data: {“choices”:[{“content_filter_results”:{“hate”:{“filtered”:false,“severity”:“safe”},“self_harm”:{“filtered”:false,“severity”:“safe”},“sexual”:{“filtered”:false,“severity”:“safe”},“violence”:{“filtered”:false,“severity”:“safe”}},“delta”:{“content”:" woods"},“finish_reason”:null,“index”:0,“logprobs”:null}],“created”:1720374151,“id”:“chatcmpl-9iQC7cUtcFYM4d7H1Cl3iqoDlSlfG”,“model”:“gpt-4”,“object”:“chat.completion.chunk”,“system_fingerprint”:“fp_811936bd4f”}

data: {“choices”:[{“content_filter_results”:{“hate”:{“filtered”:false,“severity”:“safe”},“self_harm”:{“filtered”:false,“severity”:“safe”},“sexual”:{“filtered”:false,“severity”:“safe”},“violence”:{“filtered”:false,“severity”:“safe”}},“delta”:{“content”:" a"},“finish_reason”:null,“index”:0,“logprobs”:null}],“created”:1720374151,“id”:“chatcmpl-9iQC7cUtcFYM4d7H1Cl3iqoDlSlfG”,“model”:“gpt-4”,“object”:“chat.completion.chunk”,“system_fingerprint”:“fp_811936bd4f”}

data: {“choices”:[{“content_filter_results”:{“hate”:{“filtered”:false,“severity”:“safe”},“self_harm”:{“filtered”:false,“severity”:“safe”},“sexual”:{“filtered”:false,“severity”:“safe”},“violence”:{“filtered”:false,“severity”:“safe”}},“delta”:{“content”:" place"},“finish_reason”:null,“index”:0,“logprobs”:null}],“created”:1720374151,“id”:“chatcmpl-9iQC7cUtcFYM4d7H1Cl3iqoDlSlfG”,“model”:“gpt-4”,“object”:“chat.completion.chunk”,“system_fingerprint”:“fp_811936bd4f”}

data: {“choices”:[{“content_filter_results”:{“hate”:{“filtered”:false,“severity”:“safe”},“self_harm”:{“filtered”:false,“severity”:“safe”},“sexual”:{“filtered”:false,“severity”:“safe”},“violence”:{“filtered”:false,“severity”:“safe”}},“delta”:{“content”:" of"},“finish_reason”:null,“index”:0,“logprobs”:null}],“created”:1720374151,“id”:“chatcmpl-9iQC7cUtcFYM4d7H1Cl3iqoDlSlfG”,“model”:“gpt-4”,“object”:“chat.completion.chunk”,“system_fingerprint”:“fp_811936bd4f”}

data: {“choices”:[{“content_filter_results”:{“hate”:{“filtered”:false,“severity”:“safe”},“self_harm”:{“filtered”:false,“severity”:“safe”},“sexual”:{“filtered”:false,“severity”:“safe”},“violence”:{“filtered”:false,“severity”:“safe”}},“delta”:{“content”:" enchant"},“finish_reason”:null,“index”:0,“logprobs”:null}],“created”:1720374151,“id”:“chatcmpl-9iQC7cUtcFYM4d7H1Cl3iqoDlSlfG”,“model”:“gpt-4”,“object”:“chat.completion.chunk”,“system_fingerprint”:“fp_811936bd4f”}

data: {“choices”:[{“content_filter_results”:{“hate”:{“filtered”:false,“severity”:“safe”},“self_harm”:{“filtered”:false,“severity”:“safe”},“sexual”:{“filtered”:false,“severity”:“safe”},“violence”:{“filtered”:false,“severity”:“safe”}},"del

part2:
ta":{“content”:“ment”},“finish_reason”:null,“index”:0,“logprobs”:null}],“created”:1720374151,“id”:“chatcmpl-9iQC7cUtcFYM4d7H1Cl3iqoDlSlfG”,“model”:“gpt-4”,“object”:“chat.completion.chunk”,“system_fingerprint”:“fp_811936bd4f”}

data: {“choices”:[{“content_filter_results”:{“hate”:{“filtered”:false,“severity”:“safe”},“self_harm”:{“filtered”:false,“severity”:“safe”},“sexual”:{“filtered”:false,“severity”:“safe”},“violence”:{“filtered”:false,“severity”:“safe”}},“delta”:{“content”:“,”},“finish_reason”:null,“index”:0,“logprobs”:null}],“created”:1720374151,“id”:“chatcmpl-9iQC7cUtcFYM4d7H1Cl3iqoDlSlfG”,“model”:“gpt-4”,“object”:“chat.completion.chunk”,“system_fingerprint”:“fp_811936bd4f”}

data: {“choices”:[{“content_filter_results”:{“hate”:{“filtered”:false,“severity”:“safe”},“self_harm”:{“filtered”:false,“severity”:“safe”},“sexual”:{“filtered”:false,“severity”:“safe”},“violence”:{“filtered”:false,“severity”:“safe”}},“delta”:{“content”:" where"},“finish_reason”:null,“index”:0,“logprobs”:null}],“created”:1720374151,“id”:“chatcmpl-9iQC7cUtcFYM4d7H1Cl3iqoDlSlfG”,“model”:“gpt-4”,“object”:“chat.completion.chunk”,“system_fingerprint”:“fp_811936bd4f”}

data: {“choices”:[{“content_filter_results”:{“hate”:{“filtered”:false,“severity”:“safe”},“self_harm”:{“filtered”:false,“severity”:“safe”},“sexual”:{“filtered”:false,“severity”:“safe”},“violence”:{“filtered”:false,“severity”:“safe”}},“delta”:{“content”:" flowers"},“finish_reason”:null,“index”:0,“logprobs”:null}],“created”:1720374151,“id”:“chatcmpl-9iQC7cUtcFYM4d7H1Cl3iqoDlSlfG”,“model”:“gpt-4”,“object”:“chat.completion.chunk”,“system_fingerprint”:“fp_811936bd4f”}

data: {“choices”:[{“content_filter_results”:{“hate”:{“filtered”:false,“severity”:“safe”},“self_harm”:{“filtered”:false,“severity”:“safe”},“sexual”:{“filtered”:false,“severity”:“safe”},“violence”:{“filtered”:false,“severity”:“safe”}},“delta”:{“content”:" gl"},“finish_reason”:null,“index”:0,“logprobs”:null}],“created”:1720374151,“id”:“chatcmpl-9iQC7cUtcFYM4d7H1Cl3iqoDlSlfG”,“model”:“gpt-4”,“object”:“chat.completion.chunk”,“system_fingerprint”:“fp_811936bd4f”}

Im using a Java application and not an OpenAI SDK.
Any idea what can cause that?

Welcome to the community!

Yeah, this has been an issue for a while. This is one way you can deal with it:

  1. buffer = “”
  2. on data: try to parse (buffer + data)
    2.1. if success, buffer = “”, process content
    2.2. if failure, buffer += data, wait for next frame.

Technically it’s not a bug, it’s just one of the annoying little things you have to deal with.

1 Like