Getting constant "Internal Server Error" how should I resolve this?

I am getting the “Internal Server Error” around 60-70% of the time while using the OpenAI API in my code.

CODE

def summarize_tweets(senti, sheet_name, tweets):
    tweets_text = "\n".join([str(tweet) for tweet in tweets])
    # Initialize tiktoken encoding
    encoding = tiktoken.get_encoding("gpt-4o-mini")
    # Count the number of tokens in tweets_text
    tokens = encoding.encode(tweets_text)
    token_count = len(tokens)
    print(f"Token count: {token_count}")

    #st.write(f"Prompt: Summarize tweets from the tweets mentioned below having a {senti} sentiment related to the topic '{sheet_name}':\n\n{tweets_text}")
    msg = openai_client.chat.completions.create(
        model="gpt-4o-mini",
        messages=[
            {
                "role": "user",
                "content": f"Summarize tweets from the ones mentioned below having a {senti} sentiment related to the topic '{sheet_name}':\n\n{tweets_text}",
            }
        ], 
    )

I have a list of tweet topics and for each tweet topic I have 3 sentiments (“Positive”, “Negative”, “Neutral”) so for each topic I call the API 3 times and pass only 100 tweets in each call.

So, for example:
List of topics: [“eco-friendly”, “nature”, “waste”]
Each topics has 300 tweets, 100 positive, 100 negative and 100 neutral.

ERROR

openai.InternalServerError: Internal Server Error
Traceback:
File "/Users/.venv/lib/python3.13/site-packages/streamlit/runtime/scriptrunner/exec_code.py", line 88, in exec_func_with_error_handling
    result = func()
File "/Users/.venv/lib/python3.13/site-packages/streamlit/runtime/scriptrunner/script_runner.py", line 579, in code_to_exec
    exec(code, module.__dict__)
    ~~~~^^^^^^^^^^^^^^^^^^^^^^^
File "/Users//s7a_add_all_feature_working_code.py", line 421, in <module>
    main()
    ~~~~^^
File "/Users/s7a_add_all_feature_working_code.py", line 376, in main
    positive_summary = summarize_tweets(senti, sheet_name, random_positive_tweets)
File "/Users/s7a_add_all_feature_working_code.py", line 345, in summarize_tweets
    msg = openai_client.chat.completions.create(
        model="gpt-4o-mini",
    ...<5 lines>...
        ],
    )
File "/Users/.venv/lib/python3.13/site-packages/openai/_utils/_utils.py", line 279, in wrapper
    return func(*args, **kwargs)
File "/Users/.venv/lib/python3.13/site-packages/openai/resources/chat/completions.py", line 859, in create
    return self._post(
           ~~~~~~~~~~^
        "/chat/completions",
        ^^^^^^^^^^^^^^^^^^^^
    ...<40 lines>...
        stream_cls=Stream[ChatCompletionChunk],
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
File "/Users/.venv/lib/python3.13/site-packages/openai/_base_client.py", line 1283, in post
    return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
                           ~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/.venv/lib/python3.13/site-packages/openai/_base_client.py", line 960, in request
    return self._request(
           ~~~~~~~~~~~~~^
        cast_to=cast_to,
        ^^^^^^^^^^^^^^^^
    ...<3 lines>...
        retries_taken=retries_taken,
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
File "/Users/.venv/lib/python3.13/site-packages/openai/_base_client.py", line 1049, in _request
    return self._retry_request(
           ~~~~~~~~~~~~~~~~~~~^
        input_options,
        ^^^^^^^^^^^^^^
    ...<4 lines>...
        stream_cls=stream_cls,
        ^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
File "/Users/.venv/lib/python3.13/site-packages/openai/_base_client.py", line 1098, in _retry_request
    return self._request(
           ~~~~~~~~~~~~~^
        options=options,
        ^^^^^^^^^^^^^^^^
    ...<3 lines>...
        stream_cls=stream_cls,
        ^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
File "/Users/.venv/lib/python3.13/site-packages/openai/_base_client.py", line 1049, in _request
    return self._retry_request(
           ~~~~~~~~~~~~~~~~~~~^
        input_options,
        ^^^^^^^^^^^^^^
    ...<4 lines>...
        stream_cls=stream_cls,
        ^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
File "/Users/.venv/lib/python3.13/site-packages/openai/_base_client.py", line 1098, in _retry_request
    return self._request(
           ~~~~~~~~~~~~~^
        options=options,
        ^^^^^^^^^^^^^^^^
    ...<3 lines>...
        stream_cls=stream_cls,
        ^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
File "/Users/.venv/lib/python3.13/site-packages/openai/_base_client.py", line 1064, in _request
    raise self._make_status_error_from_response(err.response) from None

How should I resolve this?

1 Like