Hello,
I’ve been successfully able to pull down project reports using a Python script that I built referencing the introduction api reference guide.
I’m trying to retrieve conversation reports using what I believe to be the correct endpoint (/compliance/workspaces/{workspace_id}/conversations
), but I’m consistently getting a 400 Bad Request
error. I’ve followed the same approach that worked for projects, including handling pagination and rate limiting, but I can’t seem to get this working. Just constant 400 Bad Request Errors. I think I have been following the guide correctly.
My code is here:
import time
import json
import datetime
import requests
------------------------------------------------------------------------------
CONFIGURATION
------------------------------------------------------------------------------
API_KEY = “sk-XXXXXXXXXXXXXXXXXXXXXXXXXX”
WORKSPACE_ID = “XXXXXXXXXXXXXXXXXXXXXXXXXX”
BASE_URL = “api.chatgpt dot com/v1” # Full link removed to allow posting on forum
Optional: Maximum times to retry a request when encountering 429 responses
MAX_RETRIES = 5
Optional: Base delay (in seconds) before retrying on 429; can increase exponentially
BASE_DELAY = 2
def list_conversations(workspace_id, limit=20):
“”"
Generator function to list all conversations in a workspace using pagination.
“”"
url = f"{BASE_URL}/compliance/workspaces/{workspace_id}/conversations"
headers = {“Authorization”: f"Bearer {API_KEY}"}
after = None
while True:
params = {"limit": limit}
if after:
params["after"] = after
retry_count = 0
while True:
try:
response = requests.get(url, headers=headers, params=params)
if response.status_code == 429:
if retry_count < MAX_RETRIES:
retry_count += 1
sleep_time = BASE_DELAY * (2 ** (retry_count - 1))
print(f"[WARN] Rate-limited. Retrying in {sleep_time} seconds...")
time.sleep(sleep_time)
continue
else:
response.raise_for_status()
else:
response.raise_for_status()
break
except requests.exceptions.RequestException as e:
print(f"[ERROR] Request failed: {e}")
raise
data = response.json()
# Yield each conversation from the API response
for convo in data.get("data", []):
yield convo
# Check if there are more conversations to fetch
if not data.get("has_more"):
break
after = data.get("last_id")
def main():
all_conversations =
print(“Fetching all conversations from the workspace…”)
for convo in list_conversations(WORKSPACE_ID, limit=10):
# Convert numeric timestamp to human-readable string in UK format
raw_ts = convo.get('created_at')
if raw_ts:
dt_obj = datetime.datetime.utcfromtimestamp(raw_ts)
# dd-mm-yyyy HH:MM:SS
uk_format_with_time = dt_obj.strftime('%d-%m-%Y %H:%M:%S')
else:
uk_format_with_time = "N/A"
print(f"Conversation ID: {convo['id']}, Created At (UK): {uk_format_with_time}")
# Store the UK-formatted time back into the conversation data if you want to export it
convo["created_at_uk"] = uk_format_with_time
all_conversations.append(convo)
# EXAMPLE 1: Save to JSON file
with open("conversations_output.json", "w", encoding="utf-8") as f:
json.dump(all_conversations, f, indent=2)
print("All conversation data saved to 'conversations_output.json'")
# EXAMPLE 2: Save to CSV
with open("conversations_output.csv", "w", encoding="utf-8") as f:
# Adjust the columns below to match what your conversation data contains
f.write("id,created_at_uk\n")
for convo in all_conversations:
row = f"{convo['id']},{convo.get('created_at_uk','')}\n"
f.write(row)
print("All conversation data saved to 'conversations_output.csv'")
print(f"Total Conversations Fetched: {len(all_conversations)}")
if name == “main”:
main()
Any help with this would be greatly appreciated. Also, apologies in advance, I’m very new to this.
Let me know if you need any further details.