thank you so much.
the error is :
RateLimitError Traceback (most recent call last)
Cell In[13], line 74
71 speak(response)
73 if name == “main”:
—> 74 main()
Cell In[13], line 67, in main()
65 break
66 # Get ChatGPT response
—> 67 response = get_chatgpt_response(command)
68 print(“ChatGPT:”, response)
70 # Speak the response
Cell In[13], line 42, in get_chatgpt_response(text)
38 def get_chatgpt_response(text):
39
40 # Prepare the prompt with the user’s input text
—> 42 response = client.chat.completions.create(
43 model=“gpt-3.5-turbo”,
44 messages=[{“role”: “system”, “content”: "I want you to act as a physics tutor. I will provide some physics equations or concepts, and it will be your job to solve it. "},
45 {“role”: “user”, “content”: text}]
46 )
47 return response.choices[0].message.content
File ~\anaconda3\envs\ProjectCIN\lib\site-packages\openai_utils_utils.py:275, in required_args..inner..wrapper(*args, **kwargs)
273 msg = f"Missing required argument: {quote(missing[0])}"
274 raise TypeError(msg)
→ 275 return func(*args, **kwargs)
File ~\anaconda3\envs\ProjectCIN\lib\site-packages\openai\resources\chat\completions.py:667, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
615 @required_args([“messages”, “model”], [“messages”, “model”, “stream”])
616 def create(
617 self,
(…)
665 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
666 ) → ChatCompletion | Stream[ChatCompletionChunk]:
→ 667 return self._post(
668 “/chat/completions”,
669 body=maybe_transform(
670 {
671 “messages”: messages,
672 “model”: model,
673 “frequency_penalty”: frequency_penalty,
674 “function_call”: function_call,
675 “functions”: functions,
676 “logit_bias”: logit_bias,
677 “logprobs”: logprobs,
678 “max_tokens”: max_tokens,
679 “n”: n,
680 “presence_penalty”: presence_penalty,
681 “response_format”: response_format,
682 “seed”: seed,
683 “stop”: stop,
684 “stream”: stream,
685 “temperature”: temperature,
686 “tool_choice”: tool_choice,
687 “tools”: tools,
688 “top_logprobs”: top_logprobs,
689 “top_p”: top_p,
690 “user”: user,
691 },
692 completion_create_params.CompletionCreateParams,
693 ),
694 options=make_request_options(
695 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
696 ),
697 cast_to=ChatCompletion,
698 stream=stream or False,
699 stream_cls=Stream[ChatCompletionChunk],
700 )
File ~\anaconda3\envs\ProjectCIN\lib\site-packages\openai_base_client.py:1213, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1199 def post(
1200 self,
1201 path: str,
(…)
1208 stream_cls: type[_StreamT] | None = None,
1209 ) → ResponseT | _StreamT:
1210 opts = FinalRequestOptions.construct(
1211 method=“post”, url=path, json_data=body, files=to_httpx_files(files), **options
1212 )
→ 1213 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File ~\anaconda3\envs\ProjectCIN\lib\site-packages\openai_base_client.py:902, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
893 def request(
894 self,
895 cast_to: Type[ResponseT],
(…)
900 stream_cls: type[_StreamT] | None = None,
901 ) → ResponseT | _StreamT:
→ 902 return self._request(
903 cast_to=cast_to,
904 options=options,
905 stream=stream,
906 stream_cls=stream_cls,
907 remaining_retries=remaining_retries,
908 )
File ~\anaconda3\envs\ProjectCIN\lib\site-packages\openai_base_client.py:978, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
976 if retries > 0 and self._should_retry(err.response):
977 err.response.close()
→ 978 return self._retry_request(
979 options,
980 cast_to,
981 retries,
982 err.response.headers,
983 stream=stream,
984 stream_cls=stream_cls,
985 )
987 # If the response is streamed then we need to explicitly read the response
988 # to completion before attempting to access the response text.
989 if not err.response.is_closed:
File ~\anaconda3\envs\ProjectCIN\lib\site-packages\openai_base_client.py:1026, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
1022 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
1023 # different thread if necessary.
1024 time.sleep(timeout)
→ 1026 return self._request(
1027 options=options,
1028 cast_to=cast_to,
1029 remaining_retries=remaining,
1030 stream=stream,
1031 stream_cls=stream_cls,
1032 )
File ~\anaconda3\envs\ProjectCIN\lib\site-packages\openai_base_client.py:978, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
976 if retries > 0 and self._should_retry(err.response):
977 err.response.close()
→ 978 return self._retry_request(
979 options,
980 cast_to,
981 retries,
982 err.response.headers,
983 stream=stream,
984 stream_cls=stream_cls,
985 )
987 # If the response is streamed then we need to explicitly read the response
988 # to completion before attempting to access the response text.
989 if not err.response.is_closed:
File ~\anaconda3\envs\ProjectCIN\lib\site-packages\openai_base_client.py:1026, in SyncAPIClient._retry_request(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)
1022 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
1023 # different thread if necessary.
1024 time.sleep(timeout)
→ 1026 return self._request(
1027 options=options,
1028 cast_to=cast_to,
1029 remaining_retries=remaining,
1030 stream=stream,
1031 stream_cls=stream_cls,
1032 )
File ~\anaconda3\envs\ProjectCIN\lib\site-packages\openai_base_client.py:993, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
990 err.response.read()
992 log.debug(“Re-raising status error”)
→ 993 raise self._make_status_error_from_response(err.response) from None
995 return self._process_response(
996 cast_to=cast_to,
997 options=options,
(…)
1000 stream_cls=stream_cls,
1001 )
RateLimitError: Error code: 429 - {‘error’: {‘message’: ‘You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.’, ‘type’: ‘insufficient_quota’, ‘param’: None, ‘code’: ‘insufficient_quota’}}
Selection deleted
also libraires are :
import speech_recognition as sr
from gtts import gTTS
import os
import playsound
from openai import OpenAI