Had bill overdue paid it, gui says paid but any new api keys don't work

{
	"name": "AuthenticationError",
	"message": "Error code: 401 - {'error': {'message': 'Incorrect API key provided: YOUR_OPE*******_KEY. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}",
	"stack": "---------------------------------------------------------------------------
AuthenticationError                       Traceback (most recent call last)
Cell In[3], line 24
     18 smart_scraper_graph = SmartScraperGraph(
     19     prompt=my_prompt,
     20     source=source_url,
     21     config=graph_config
     22 )
     23 # Run the pipeline
---> 24 actionsList = smart_scraper_graph.run()
     25 # Save the result to a JSON file
     26 timestamp = time.strftime(\"%Y%m%d%H%M%S\")

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/scrapegraphai/graphs/smart_scraper_graph.py:114, in SmartScraperGraph.run(self)
    106 \"\"\"
    107 Executes the scraping process and returns the answer to the prompt.
    108 
    109 Returns:
    110     str: The answer to the prompt.
    111 \"\"\"
    113 inputs = {\"user_prompt\": self.prompt, self.input_key: self.source}
--> 114 self.final_state, self.execution_info = self.graph.execute(inputs)
    116 return self.final_state.get(\"answer\", \"No answer found.\")

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/scrapegraphai/graphs/base_graph.py:263, in BaseGraph.execute(self, initial_state)
    261     return (result[\"_state\"], [])
    262 else:
--> 263     return self._execute_standard(initial_state)

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/scrapegraphai/graphs/base_graph.py:185, in BaseGraph._execute_standard(self, initial_state)
    172     graph_execution_time = time.time() - start_time
    173     log_graph_execution(
    174         graph_name=self.graph_name,
    175         source=source,
   (...)
    183         exception=str(e)
    184     )
--> 185     raise e
    186 node_exec_time = time.time() - curr_time
    187 total_exec_time += node_exec_time

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/scrapegraphai/graphs/base_graph.py:169, in BaseGraph._execute_standard(self, initial_state)
    167 with get_openai_callback() as cb:
    168     try:
--> 169         result = current_node.execute(state)
    170     except Exception as e:
    171         error_node = current_node.node_name

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/scrapegraphai/nodes/generate_answer_node.py:129, in GenerateAnswerNode.execute(self, state)
    123 prompt = PromptTemplate(
    124     template=template_no_chunks_prompt ,
    125     input_variables=[\"question\"],
    126     partial_variables={\"context\": doc,
    127                         \"format_instructions\": format_instructions})
    128 chain =  prompt | self.llm_model | output_parser
--> 129 answer = chain.invoke({\"question\": user_prompt})
    131 state.update({self.output[0]: answer})
    132 return state

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/langchain_core/runnables/base.py:2878, in RunnableSequence.invoke(self, input, config, **kwargs)
   2876             input = context.run(step.invoke, input, config, **kwargs)
   2877         else:
-> 2878             input = context.run(step.invoke, input, config)
   2879 # finish the root run
   2880 except BaseException as e:

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py:276, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
    265 def invoke(
    266     self,
    267     input: LanguageModelInput,
   (...)
    271     **kwargs: Any,
    272 ) -> BaseMessage:
    273     config = ensure_config(config)
    274     return cast(
    275         ChatGeneration,
--> 276         self.generate_prompt(
    277             [self._convert_input(input)],
    278             stop=stop,
    279             callbacks=config.get(\"callbacks\"),
    280             tags=config.get(\"tags\"),
    281             metadata=config.get(\"metadata\"),
    282             run_name=config.get(\"run_name\"),
    283             run_id=config.pop(\"run_id\", None),
    284             **kwargs,
    285         ).generations[0][0],
    286     ).message

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py:776, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
    768 def generate_prompt(
    769     self,
    770     prompts: List[PromptValue],
   (...)
    773     **kwargs: Any,
    774 ) -> LLMResult:
    775     prompt_messages = [p.to_messages() for p in prompts]
--> 776     return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py:633, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    631         if run_managers:
    632             run_managers[i].on_llm_error(e, response=LLMResult(generations=[]))
--> 633         raise e
    634 flattened_outputs = [
    635     LLMResult(generations=[res.generations], llm_output=res.llm_output)  # type: ignore[list-item]
    636     for res in results
    637 ]
    638 llm_output = self._combine_llm_outputs([res.llm_output for res in results])

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py:623, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    620 for i, m in enumerate(messages):
    621     try:
    622         results.append(
--> 623             self._generate_with_cache(
    624                 m,
    625                 stop=stop,
    626                 run_manager=run_managers[i] if run_managers else None,
    627                 **kwargs,
    628             )
    629         )
    630     except BaseException as e:
    631         if run_managers:

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py:845, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
    843 else:
    844     if inspect.signature(self._generate).parameters.get(\"run_manager\"):
--> 845         result = self._generate(
    846             messages, stop=stop, run_manager=run_manager, **kwargs
    847         )
    848     else:
    849         result = self._generate(messages, stop=stop, **kwargs)

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/langchain_openai/chat_models/base.py:635, in BaseChatOpenAI._generate(self, messages, stop, run_manager, **kwargs)
    633     generation_info = {\"headers\": dict(raw_response.headers)}
    634 else:
--> 635     response = self.client.create(**payload)
    636 return self._create_chat_result(response, generation_info)

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/openai/_utils/_utils.py:274, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
    272             msg = f\"Missing required argument: {quote(missing[0])}\"
    273     raise TypeError(msg)
--> 274 return func(*args, **kwargs)

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/openai/resources/chat/completions.py:668, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, parallel_tool_calls, presence_penalty, response_format, seed, service_tier, stop, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
    633 @required_args([\"messages\", \"model\"], [\"messages\", \"model\", \"stream\"])
    634 def create(
    635     self,
   (...)
    665     timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
    666 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
    667     validate_response_format(response_format)
--> 668     return self._post(
    669         \"/chat/completions\",
    670         body=maybe_transform(
    671             {
    672                 \"messages\": messages,
    673                 \"model\": model,
    674                 \"frequency_penalty\": frequency_penalty,
    675                 \"function_call\": function_call,
    676                 \"functions\": functions,
    677                 \"logit_bias\": logit_bias,
    678                 \"logprobs\": logprobs,
    679                 \"max_tokens\": max_tokens,
    680                 \"n\": n,
    681                 \"parallel_tool_calls\": parallel_tool_calls,
    682                 \"presence_penalty\": presence_penalty,
    683                 \"response_format\": response_format,
    684                 \"seed\": seed,
    685                 \"service_tier\": service_tier,
    686                 \"stop\": stop,
    687                 \"stream\": stream,
    688                 \"stream_options\": stream_options,
    689                 \"temperature\": temperature,
    690                 \"tool_choice\": tool_choice,
    691                 \"tools\": tools,
    692                 \"top_logprobs\": top_logprobs,
    693                 \"top_p\": top_p,
    694                 \"user\": user,
    695             },
    696             completion_create_params.CompletionCreateParams,
    697         ),
    698         options=make_request_options(
    699             extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
    700         ),
    701         cast_to=ChatCompletion,
    702         stream=stream or False,
    703         stream_cls=Stream[ChatCompletionChunk],
    704     )

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/openai/_base_client.py:1260, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
   1246 def post(
   1247     self,
   1248     path: str,
   (...)
   1255     stream_cls: type[_StreamT] | None = None,
   1256 ) -> ResponseT | _StreamT:
   1257     opts = FinalRequestOptions.construct(
   1258         method=\"post\", url=path, json_data=body, files=to_httpx_files(files), **options
   1259     )
-> 1260     return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/openai/_base_client.py:937, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
    928 def request(
    929     self,
    930     cast_to: Type[ResponseT],
   (...)
    935     stream_cls: type[_StreamT] | None = None,
    936 ) -> ResponseT | _StreamT:
--> 937     return self._request(
    938         cast_to=cast_to,
    939         options=options,
    940         stream=stream,
    941         stream_cls=stream_cls,
    942         remaining_retries=remaining_retries,
    943     )

File ~/miniconda3/envs/scrapegraph-ai/lib/python3.9/site-packages/openai/_base_client.py:1041, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
   1038         err.response.read()
   1040     log.debug(\"Re-raising status error\")
-> 1041     raise self._make_status_error_from_response(err.response) from None
   1043 return self._process_response(
   1044     cast_to=cast_to,
   1045     options=options,
   (...)
   1049     retries_taken=options.get_max_retries(self.max_retries) - retries,
   1050 )

AuthenticationError: Error code: 401 - {'error': {'message': 'Incorrect API key provided: YOUR_OPE*******_KEY. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}"
}

I paid my overdue bill and recharged my account so what’s wrong? I’m trying to use scrapegraph ai with my openai key.
The code is similar to the one here: GitHub - ScrapeGraphAI/Scrapegraph-ai: Python scraper based on AI

works now after about 8 hours.

2 Likes