URGENT! PLEASE HELP...I am a beginner, trying to connect ChatGPT to Python

Code:
from openai import OpenAI
client = OpenAI(
api_key = ‘xxxxxxxxxxxxxxxxx’
)

def get_completion(prompt, client_instance, model= ‘gpt-3.5-turbo’):
messages = [{‘role’: ‘user’, ‘content’: prompt}]
response = client_instance.chat.completions.create(
model=model,
messages=messages,
max_tokens=200,
temperature=0,
)
return response.choices[0].message.content

prompt = ‘Can you write an essay on containmen?’
get_completion(prompt, client)

Error:
RateLimitError Traceback (most recent call last)
Cell In[47], line 18
15 return response.choices[0].message.content
17 prompt = ‘How far away is the moon?’
—> 18 get_completion(prompt, client)

Cell In[47], line 9, in get_completion(prompt, client_instance, model)
7 def get_completion(prompt, client_instance, model= ‘gpt-3.5-turbo’):
8 messages = [{‘role’: ‘user’, ‘content’: prompt}]
----> 9 response = client_instance.chat.completions.create(
10 model=model,
11 messages=messages,
12 max_tokens=200,
13 temperature=0,
14 )
15 return response.choices[0].message.content

File ~\PycharmProjects\apiQuery\venv\Lib\site-packages\openai_utils_utils.py:301, in required_args..inner..wrapper(*args, **kwargs)
299 msg = f"Missing required argument: {quote(missing[0])}"
300 raise TypeError(msg)
→ 301 return func(*args, **kwargs)

File ~\PycharmProjects\apiQuery\venv\Lib\site-packages\openai\resources\chat\completions.py:598, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, max_tokens, n, presence_penalty, response_format, seed, stop, stream, temperature, tool_choice, tools, top_p, user, extra_headers, extra_query, extra_body, timeout)
551 @required_args([“messages”, “model”], [“messages”, “model”, “stream”])
552 def create(
553 self,
(…)
596 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
597 ) → ChatCompletion | Stream[ChatCompletionChunk]:
→ 598 return self._post(
599 “/chat/completions”,
600 body=maybe_transform(
601 {
602 “messages”: messages,
603 “model”: model,
604 “frequency_penalty”: frequency_penalty,
605 “function_call”: function_call,
606 “functions”: functions,
607 “logit_bias”: logit_bias,
608 “max_tokens”: max_tokens,
609 “n”: n,
610 “presence_penalty”: presence_penalty,
611 “response_format”: response_format,
612 “seed”: seed,
613 “stop”: stop,
614 “stream”: stream,
615 “temperature”: temperature,
616 “tool_choice”: tool_choice,
617 “tools”: tools,
618 “top_p”: top_p,
619 “user”: user,
620 },
621 completion_create_params.CompletionCreateParams,
622 ),
623 options=make_request_options(
624 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
625 ),
626 cast_to=ChatCompletion,
627 stream=stream or False,
628 stream_cls=Stream[ChatCompletionChunk],
629 )

File ~\PycharmProjects\apiQuery\venv\Lib\site-packages\openai_base_client.py:1096, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1082 def post(
1083 self,
1084 path: str,
(…)
1091 stream_cls: type[_StreamT] | None = None,
1092 ) → ResponseT | _StreamT:
1093 opts = FinalRequestOptions.construct(
1094 method=“post”, url=path, json_data=body, files=to_httpx_files(files), **options
1095 )
→ 1096 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))

File ~\PycharmProjects\apiQuery\venv\Lib\site-packages\openai_base_client.py:856, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
847 def request(
848 self,
849 cast_to: Type[ResponseT],
(…)
854 stream_cls: type[_StreamT] | None = None,
855 ) → ResponseT | _StreamT:
→ 856 return self._request(
857 cast_to=cast_to,
858 options=options,
859 stream=stream,
860 stream_cls=stream_cls,
861 remaining_retries=remaining_retries,
862 )

File ~\PycharmProjects\apiQuery\venv\Lib\site-packages\openai_base_client.py:894, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
892 if retries > 0 and self._should_retry(err.response):
893 err.response.close()
→ 894 return self._retry_request(
895 options,
896 cast_to,
897 retries,
898 err.response.headers,
899 stream=stream,
900 stream_cls=stream_cls,
901 )
903 # If the response is streamed then we need to explicitly read the response
904 # to completion before attempting to access the response text.
905 if not err.response.is_closed:

File ~\PycharmProjects\apiQuery\venv\Lib\site-packages\openai_base_client.py:966, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
962 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
963 # different thread if necessary.
964 time.sleep(timeout)
→ 966 return self._request(
967 options=options,
968 cast_to=cast_to,
969 remaining_retries=remaining,
970 stream=stream,
971 stream_cls=stream_cls,
972 )

File ~\PycharmProjects\apiQuery\venv\Lib\site-packages\openai_base_client.py:894, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
892 if retries > 0 and self._should_retry(err.response):
893 err.response.close()
→ 894 return self._retry_request(
895 options,
896 cast_to,
897 retries,
898 err.response.headers,
899 stream=stream,
900 stream_cls=stream_cls,
901 )
903 # If the response is streamed then we need to explicitly read the response
904 # to completion before attempting to access the response text.
905 if not err.response.is_closed:

File ~\PycharmProjects\apiQuery\venv\Lib\site-packages\openai_base_client.py:966, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
962 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
963 # different thread if necessary.
964 time.sleep(timeout)
→ 966 return self._request(
967 options=options,
968 cast_to=cast_to,
969 remaining_retries=remaining,
970 stream=stream,
971 stream_cls=stream_cls,
972 )

File ~\PycharmProjects\apiQuery\venv\Lib\site-packages\openai_base_client.py:908, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
905 if not err.response.is_closed:
906 err.response.read()
→ 908 raise self._make_status_error_from_response(err.response) from None
909 except httpx.TimeoutException as err:
910 if response is not None:

RateLimitError: Error code: 429 - {‘error’: {‘message’: ‘You exceeded your current quota, please check your plan and billing details.’, ‘type’: ‘insufficient_quota’, ‘param’: None, ‘code’: ‘insufficient_quota’}}

1 Like

Welcome to the Dev community!

Have you added credits to your API account? The error seems to suggest that you don’t have the credits needed to call the API.

1 Like

I have not! Is there a free trial or does anybody know how much this would cost?

1 Like

There should be a small free credit grant when you create the account, if not I believe you can start with as little as $5.

This topic was automatically closed 2 days after the last reply. New replies are no longer allowed.