I called a few times and reloaded a few times. Seemed healthy on my few old examples, which is all I have.
Hereās some Python written to not fail on you.
- You can put
api_key="sk-proj-your_api_key_string" directly in code if local for assurance.
- You can put in a loop and keep asking for your vector stores with a bit of
time.sleep(5) to report on intermittent issues, or increase the default count.
- Default results are max 20, and the API maximum return is 100. The playground pages 10 at a time.
import os
import json
import gzip
import zlib
import urllib.request
import urllib.parse
def decode_content(data, encoding):
"""
Decompress response body according to Content-Encoding header.
Supports: gzip, deflate. Other encoding values are returned as-is.
"""
if not encoding:
return data
enc = encoding.lower().strip()
if enc == "gzip":
return gzip.decompress(data)
if enc == "deflate":
return zlib.decompress(data)
return data
def list_vector_store_ids(
limit=20,
after=None,
before=None,
order="desc"
):
"""
Fetch vector store entries from OpenAI and return a list of IDs.
Parameters:
limit - max number of items to return (1ā100, default 20)
after - cursor ID to fetch items after this ID
before - cursor ID to fetch items before this ID
order - "asc" or "desc" sort on created_at (default "desc")
Returns:
A list of string IDs for each vector_store in the response.
"""
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise EnvironmentError(
"OPENAI_API_KEY environment variable is not set"
)
# build query parameters
params = {}
if limit is not None:
params["limit"] = str(limit)
if after:
params["after"] = after
if before:
params["before"] = before
if order:
params["order"] = order
qs = urllib.parse.urlencode(params)
# assemble the full URL
url = (
"https://api.openai.com/v1/vector_stores"
"?" + qs
)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.5",
"Authorization": "Bearer " + api_key,
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "api.openai.com",
"OpenAI-Beta": "assistants=v2",
"Pragma": "no-cache",
"Priority": "u=0",
"User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:143.0) "
"Gecko/20100101 Firefox/143.0"
),
}
req = urllib.request.Request(url, headers=headers, method="GET")
with urllib.request.urlopen(req) as resp:
raw = resp.read()
encoding = resp.getheader("Content-Encoding")
body = decode_content(raw, encoding)
data = json.loads(body.decode("utf-8"))
ids = []
for item in data.get("data", []):
vid = item.get("id")
if vid:
ids.append(vid)
return ids
if __name__ == "__main__":
# Example usage: fetch up to 10 most recent stores
vector_ids = list_vector_store_ids(limit=10)
for vid in vector_ids:
print(vid)
'''
The vector store list call returns a list of items. If there was only one:
{
"object": "list",
"data": [
{
"id": "vs_abcdef1234567890",
"object": "vector_store",
"created_at": 1700000000,
"name": "example vector store",
"description": null,
"usage_bytes": 2048,
"file_counts": {
"in_progress": 0,
"completed": 1,
"failed": 0,
"cancelled": 0,
"total": 1
},
"status": "completed",
"expires_after": null,
"expires_at": null,
"last_active_at": 1700000000,
"metadata": {}
}
],
"first_id": "vs_abcdef1234567890",
"last_id": "vs_abcdef1234567890",
"has_more": false
}
'''
The API endpoint itself has rate limits for calls that are non-consuming of AI resources. They are pretty high but reachable.