Hi all,
I created an assistant from the browser and it works really well. I wrote some python code following the documentation to send a message the assistant and get a response back. Everything works well, but the assistant in the browser responds back with code (as it should), and the same assistant from the API will omit all of that out. feels like it’s some type of rate limit response? I’m not sure why but the conversation with the API assistant is only theory based.
Python code:
import json
from openai import OpenAI
import time
import os
#store somewhere more secure, remove from scipt
client = OpenAI(
api_key=‘sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx’,
)
assistant1_id = ‘xxxxxxxxxxxxxxxxxxxxxxxxx’
def create_thread(user_message):
thread = client.beta.threads.create()
try:
# Add user message to the thread
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_message
)
print("### message inside send_message is:", message)
create_run(thread.id)
except Exception as e:
print(f"An error occurred while sending the message: {e}")
return None
def create_run(thread_ID):
try:
# Run assistant
run = client.beta.threads.runs.create(
thread_id=thread_ID,
assistant_id=assistant1_id
)
print("### run from create_run is:", run)
display_assistant_response(thread_ID,run.id)
except Exception as e:
print(f"An error occurred while sending the message: {e}")
return None
def display_assistant_response(thread_ID, run_id):
try:
# Display assistant response
run = client.beta.threads.runs.retrieve(
thread_id=thread_ID,
run_id=run_id
)
#print(“### run from display_assistant_response is:”, run)
count = 0
while(run.status == "queued" or run.status == "in_progress" and count < 5):
time.sleep(1)
run = client.beta.threads.runs.retrieve(
thread_id=thread_ID,
run_id=run_id
)
count = count + 1
# Retrieve messages from thread after run complete
messages = client.beta.threads.messages.list(
thread_id=thread_ID
)
print(f"---------------------------------------------")
for message in messages.data:
for content in message.content:
if content.type == 'text' and content.text and content.text.value:
value = content.text.value
print("### Extracted value:", value)
# Add the extracted value to the conversation history
if value: # Additional check to ensure value is not null
system_message = {"role": "system", "content": value}
else:
print("Warning: Null value encountered in message content.")
display_gpt_response(value)
except Exception as e:
print(f"An error occurred while sending the message: {e}")
return None
def display_gpt_response(value):
try:
response = client.chat.completions.create(
model=“gpt-4-1106-preview”,
messages=[
{“role”: “system”, “content”: value}
]
)
#print(“### response 1 from gpt is:”, response.choices[0].message.content)
print(“### response 1 from gpt is:”, response)
except Exception as e:
print(f"An error occurred while sending the message: {e}")
return None
def main():
print(“hello!”)
user_message= “create a small LAN network.”
create_thread(user_message)
print("end")
if name == “main”:
main()