Continuous server overloaded error when attempting to google via auto-gpt, paid account

Here’s where it breaks:

File "<frozen runpy>", line 198, in _run_module_as_main
  File "<frozen runpy>", line 88, in _run_code
  File "C:\Users\think\Desktop\Auto-GPT-0.4.0\autogpt\__main__.py", line 5, in <module>
    autogpt.cli.main()
  File "C:\Users\think\AppData\Local\Programs\Python\Python311\Lib\site-packages\click\core.py", line 1130, in __call__
    return self.main(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\AppData\Local\Programs\Python\Python311\Lib\site-packages\click\core.py", line 1055, in main
    rv = self.invoke(ctx)
         ^^^^^^^^^^^^^^^^
  File "C:\Users\think\AppData\Local\Programs\Python\Python311\Lib\site-packages\click\core.py", line 1635, in invoke
    rv = super().invoke(ctx)
         ^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\AppData\Local\Programs\Python\Python311\Lib\site-packages\click\core.py", line 1404, in invoke
    return ctx.invoke(self.callback, **ctx.params)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\AppData\Local\Programs\Python\Python311\Lib\site-packages\click\core.py", line 760, in invoke
    return __callback(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\AppData\Local\Programs\Python\Python311\Lib\site-packages\click\decorators.py", line 26, in new_func
    return f(get_current_context(), *args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\Desktop\Auto-GPT-0.4.0\autogpt\cli.py", line 96, in main
    run_auto_gpt(
  File "C:\Users\think\Desktop\Auto-GPT-0.4.0\autogpt\main.py", line 197, in run_auto_gpt
    agent.start_interaction_loop()
  File "C:\Users\think\Desktop\Auto-GPT-0.4.0\autogpt\agent\agent.py", line 130, in start_interaction_loop
    assistant_reply = chat_with_ai(
                      ^^^^^^^^^^^^^
  File "C:\Users\think\Desktop\Auto-GPT-0.4.0\autogpt\llm\chat.py", line 112, in chat_with_ai
    new_summary_message, trimmed_messages = agent.history.trim_messages(
                                            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\Desktop\Auto-GPT-0.4.0\autogpt\memory\message_history.py", line 79, in trim_messages
    new_summary_message = self.update_running_summary(
                          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\Desktop\Auto-GPT-0.4.0\autogpt\memory\message_history.py", line 194, in update_running_summary
    self.summary = create_chat_completion(prompt)
                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\Desktop\Auto-GPT-0.4.0\autogpt\llm\utils\__init__.py", line 53, in metered_func
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\Desktop\Auto-GPT-0.4.0\autogpt\llm\utils\__init__.py", line 87, in _wrapped
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\Desktop\Auto-GPT-0.4.0\autogpt\llm\utils\__init__.py", line 235, in create_chat_completion
    response = api_manager.create_chat_completion(
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\Desktop\Auto-GPT-0.4.0\autogpt\llm\api_manager.py", line 61, in create_chat_completion
    response = openai.ChatCompletion.create(
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai\api_resources\chat_completion.py", line 25, in create
    return super().create(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai\api_resources\abstract\engine_api_resource.py", line 153, in create
    response, _, api_key = requestor.request(
                           ^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai\api_requestor.py", line 226, in request
    resp, got_stream = self._interpret_response(result, stream)
                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\think\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai\api_requestor.py", line 619, in _interpret_response
    self._interpret_response_line(
  File "C:\Users\think\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai\api_requestor.py", line 662, in _interpret_response_line
    raise error.ServiceUnavailableError(
openai.error.ServiceUnavailableError: The server is overloaded or not ready yet.

Any ideas are welcome!

Same problem here:
File “D:\Program Files\Python\Python310\lib\runpy.py”, line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File “D:\Program Files\Python\Python310\lib\runpy.py”, line 86, in run_code
exec(code, run_globals)
File "C:\Users\Captain\Desktop\Auto-GPT-stable\Auto-GPT-stable\autogpt_main
.py", line 5, in
autogpt.cli.main()
File “D:\Program Files\Python\Python310\lib\site-packages\click\core.py”, line 1130, in call
return self.main(*args, **kwargs)
File “D:\Program Files\Python\Python310\lib\site-packages\click\core.py”, line 1055, in main
rv = self.invoke(ctx)
File “D:\Program Files\Python\Python310\lib\site-packages\click\core.py”, line 1635, in invoke
rv = super().invoke(ctx)
File “D:\Program Files\Python\Python310\lib\site-packages\click\core.py”, line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File “D:\Program Files\Python\Python310\lib\site-packages\click\core.py”, line 760, in invoke
return callback(*args, **kwargs)
File “D:\Program Files\Python\Python310\lib\site-packages\click\decorators.py”, line 26, in new_func
return f(get_current_context(), *args, **kwargs)
File “C:\Users\Captain\Desktop\Auto-GPT-stable\Auto-GPT-stable\autogpt\cli.py”, line 96, in main
run_auto_gpt(
File “C:\Users\Captain\Desktop\Auto-GPT-stable\Auto-GPT-stable\autogpt\main.py”, line 197, in run_auto_gpt
agent.start_interaction_loop()
File “C:\Users\Captain\Desktop\Auto-GPT-stable\Auto-GPT-stable\autogpt\agent\agent.py”, line 130, in start_interaction_loop
assistant_reply = chat_with_ai(
File “C:\Users\Captain\Desktop\Auto-GPT-stable\Auto-GPT-stable\autogpt\llm\chat.py”, line 193, in chat_with_ai
assistant_reply = create_chat_completion(
File "C:\Users\Captain\Desktop\Auto-GPT-stable\Auto-GPT-stable\autogpt\llm\utils_init
.py", line 53, in metered_func
return func(*args, **kwargs)
File "C:\Users\Captain\Desktop\Auto-GPT-stable\Auto-GPT-stable\autogpt\llm\utils_init
.py", line 87, in wrapped
return func(*args, **kwargs)
File "C:\Users\Captain\Desktop\Auto-GPT-stable\Auto-GPT-stable\autogpt\llm\utils_init
.py", line 235, in create_chat_completion
response = api_manager.create_chat_completion(
File “C:\Users\Captain\Desktop\Auto-GPT-stable\Auto-GPT-stable\autogpt\llm\api_manager.py”, line 61, in create_chat_completion
response = openai.ChatCompletion.create(
File “D:\Program Files\Python\Python310\lib\site-packages\openai\api_resources\chat_completion.py”, line 25, in create
return super().create(*args, **kwargs)
File “D:\Program Files\Python\Python310\lib\site-packages\openai\api_resources\abstract\engine_api_resource.py”, line 153, in create
response, _, api_key = requestor.request(
File “D:\Program Files\Python\Python310\lib\site-packages\openai\api_requestor.py”, line 226, in request
resp, got_stream = self._interpret_response(result, stream)
File “D:\Program Files\Python\Python310\lib\site-packages\openai\api_requestor.py”, line 619, in _interpret_response
self._interpret_response_line(
File “D:\Program Files\Python\Python310\lib\site-packages\openai\api_requestor.py”, line 662, in _interpret_response_line
raise error.ServiceUnavailableError(
openai.error.ServiceUnavailableError: The server is overloaded or not ready yet.

No idea how to solve it, since I am using a paid account

Check 0.4.2 should be fixed but not working for me.

1 Like

Openai is deprioritizing json in the user message now. Had issues all day long until I moved to pipes.

1 Like

Same question.Have you solved the problem?

Hi Cytranic, I’m new to this terminology, can you please share a bit more info because we are using json in prompts ?