Error 429 when calling api in python on pay as you go plan

I am sure that i am not exceeding the usage limit but i still get the error code 429. Here is my code: import speech_recognition as sr
import requests
import json
import pyttsx3
import threading

def listen_microphone():
r = sr.Recognizer()
mic = sr.Microphone()

with mic as source:
    r.adjust_for_ambient_noise(source)  # Optional: Adjust for ambient noise
    print("Listening...")
    audio = r.listen(source, phrase_time_limit=10, timeout=10)  # Adjust chunk size and timeouts as needed

try:
    print("Recognizing...")
    text = r.recognize_google(audio)  # Use Google Speech Recognition
    print(f"You said: {text}")
    return text
except sr.UnknownValueError:
    print("Sorry, I couldn't understand what you said.")
except sr.RequestError as e:
    print(f"Could not request results from Google Speech Recognition service: {e}")

return None

def send_to_gpt3(question):
api_url = ‘cant include url in topic’
headers = {
‘Content-Type’: ‘application/json’,
‘Authorization’: ‘Bearer ■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■MoWd8yBR’
}
payload = {
‘prompt’: 'If the prompt following the “prompt:” instruction asks you to perform an action, such as opening a specific Windows application or conducting a Google search, please respond with "c: " followed by the prompt provided after “prompt:”. However, if the prompt is a regular question, please provide a full sentence response as you normally would. prompt: ’ + question + ‘\nA:’,
‘max_tokens’: 500,
‘temperature’: 0.7,
‘top_p’: 1,
‘n’: 1,
‘stream’: False,
‘logprobs’: None,
‘stop’: ‘\n’
}

try:
    response = requests.post(api_url, headers=headers, data=json.dumps(payload))
    response.raise_for_status()
    #print(f"Response status code: {response.status_code}")
    #print(f"Response content: {response.content}")
    data = response.json()
    return data['choices'][0]['text']
except requests.exceptions.RequestException as e:
    print(f"Error occurred while sending request to GPT-3 API: {e}")
    return None

def speak_text(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()

def run_code_snippet(code):
def execute_code():
try:
exec(code)
except Exception as e:
print(“Error executing code:”, e)

thread = threading.Thread(target=execute_code)
thread.start()

while True:
command = listen_microphone()
if command:
response = send_to_gpt3(command)
if response:
print(f"GPT-3 response: {response}“)
speak_text(response)
if (str(response)).startswith(” c:"):
api_url = ‘cant include url in topic’
headers = {
‘Content-Type’: ‘application/json’,
‘Authorization’: ‘Bearer ■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■MoWd8yBR’
}
payload = {
‘prompt’: 'I need the complete python code to complete the following action, no more just the clean code formatted correctly: ’ + response + ‘\nA:’,
‘max_tokens’: 1000,
‘temperature’: 0.7,
‘top_p’: 1,
‘n’: 1,
‘stream’: False,
‘logprobs’: None,
‘stop’: ‘\n\n’
}

            try:
                response = requests.post(api_url, headers=headers, data=json.dumps(payload))
                response.raise_for_status()
                #print(f"Response status code: {response.status_code}")
                #print(f"Response content: {response.content}")
                data = response.json()
                code = data['choices'][0]['text']
                print(code)
            except requests.exceptions.RequestException as e:
                print(f"Error occurred while sending request to GPT-3 API: {e}")
            print(response)
else:
    print("Listening again...")

You’re not alone. I’ve been having the same issue since last night (everything was fine before that). I’ve had to resort to putting a 60s backoff between attempts once I receive this error, despite not making anywhere close to enough requests to actually trip the rate limit. Attempts to use a shorter backoff resulted in persistent failures, so this was the only thing I could find that appears to be resulting in forward progress.

FWIW, I’m using the openai-scala-client scala client rather than Python, but the 429 error code is the same as you are reporting. Also worth noting is that the error message indicates there is some sort of server error, not a rate limit error (sorry, dont have the exact error text available right now). I suspect the OpenAI infrastructure is struggling to cope with demand, but I’m just guessing.