I don’t see any outage issues but I am unable to get anything since this morning. The sanity check in the above screenshot is old code, nothing has been changed. I even changed the API key. What is happening?
Complete code:
import openai
from openai import OpenAI
import json
import os
import time
import random
from dotenv import load_dotenv
load_dotenv()
import logging
logging.basicConfig(filename="error.log", level=logging.ERROR)
model3 = "gpt-3.5-turbo-1106"
model4 = "gpt-4-1106-preview"
# define a retry decorator
def retry_with_exponential_backoff(
func,
initial_delay: float = 1,
exponential_base: float = 2,
jitter: bool = True,
max_retries: int = 1,
errors: tuple = (
openai.RateLimitError,
openai.APITimeoutError,
openai.UnprocessableEntityError,
openai.InternalServerError,
openai.APIConnectionError,
),
):
"""Retry a function with exponential backoff."""
def wrapper(*args, **kwargs):
# Initialize variables
num_retries = 0
delay = initial_delay
# Loop until a successful response or max_retries is hit or an exception is raised
while True:
try:
return func(*args, **kwargs)
# Retry on specific errors
except errors as e:
# Increment retries
num_retries += 1
# Check if max retries has been reached
if num_retries > max_retries:
raise Exception(
f"Maximum number of retries ({max_retries}) exceeded."
)
# Increment the delay
delay *= exponential_base * (1 + jitter * random.random())
logging.error(f"retry initiated {num_retries}. Delay {delay}")
# Sleep for the delay
time.sleep(delay)
# Raise exceptions for any errors not specified
except Exception as e:
raise e
return wrapper
client = OpenAI(
api_key=os.getenv("OPENAI_API_KEY_GPT_4"),
)
@retry_with_exponential_backoff
def completions_with_backoff(**kwargs):
return client.chat.completions.create(**kwargs)
def llm_response(prompt, model="gpt-4-1106-preview", temperature=0):
try:
response = completions_with_backoff(
model=model,
messages=[{"role": "user", "content": prompt}],
response_format={"type": "json_object"},
temperature=temperature,
presence_penalty=2,
)
if response.choices[0].finish_reason == "length":
return json.dumps({})
return response.choices[0].message.content
except Exception as e:
logging.error(f"something went wrong: {e}")
logging.error("----------------------")
return json.dumps({})
def get_response(response, key):
"""
get llm response, convert to json and return value at key
"""
try:
value = json.loads(response)
return value[key]
except json.decoder.JSONDecodeError:
logging.error(f"Incorrect json: {response}")
return None
except KeyError:
logging.error(f"Incorrect key {key} for json {response}")
return None
except Exception as e:
logging.error(f"Error while json loading: {e}")
return None
# Test run
prompt = '''
Classify the following review
as having either a positive or
negative sentiment:
The banana pudding was really tasty!
Output in json :
{
"sentiment": ""
}
'''
response = llm_response(prompt, model=model4)
print(response)
val = get_response(response, "sentiment")
print(val)
Python 3.10,
openai = “==1.3.5”