Update api_usage.py
Browse files- api_usage.py +13 -14
api_usage.py
CHANGED
|
@@ -33,7 +33,7 @@ def get_headers(key, org_id:str = None):
|
|
| 33 |
headers["OpenAI-Organization"] = org_id
|
| 34 |
return headers
|
| 35 |
|
| 36 |
-
def get_subscription(key, org_list):
|
| 37 |
has_gpt4 = False
|
| 38 |
has_gpt4_32k = False
|
| 39 |
default_org = ""
|
|
@@ -46,7 +46,7 @@ def get_subscription(key, org_list):
|
|
| 46 |
list_models_avai = set()
|
| 47 |
|
| 48 |
for org_in in org_list:
|
| 49 |
-
available_models = get_models(key, org_in['id'])
|
| 50 |
headers = get_headers(key, org_in['id'])
|
| 51 |
has_gpt4_32k = True if GPT_TYPES[2] in available_models else False
|
| 52 |
has_gpt4 = True if GPT_TYPES[1] in available_models else False
|
|
@@ -57,7 +57,7 @@ def get_subscription(key, org_list):
|
|
| 57 |
if has_gpt4_32k:
|
| 58 |
org.append(f"{org_in['id']} ({org_in['name']}, {org_in['title']}, {org_in['role']})")
|
| 59 |
list_models_avai.update(GPT_TYPES)
|
| 60 |
-
status_formated = format_status([GPT_TYPES[2], GPT_TYPES[1], GPT_TYPES[0]], headers)
|
| 61 |
rpm.append(status_formated[0])
|
| 62 |
tpm.append(status_formated[1])
|
| 63 |
quota.append(status_formated[2])
|
|
@@ -66,7 +66,7 @@ def get_subscription(key, org_list):
|
|
| 66 |
elif has_gpt4:
|
| 67 |
org.append(f"{org_in['id']} ({org_in['name']}, {org_in['title']}, {org_in['role']})")
|
| 68 |
list_models_avai.update([GPT_TYPES[1], GPT_TYPES[0]])
|
| 69 |
-
status_formated = format_status([GPT_TYPES[1], GPT_TYPES[0]], headers)
|
| 70 |
rpm.append(status_formated[0])
|
| 71 |
tpm.append(status_formated[1])
|
| 72 |
quota.append(status_formated[2])
|
|
@@ -75,7 +75,7 @@ def get_subscription(key, org_list):
|
|
| 75 |
else:
|
| 76 |
org.append(f"{org_in['id']} ({org_in['name']}, {org_in['title']}, {org_in['role']})")
|
| 77 |
list_models_avai.update([GPT_TYPES[0]])
|
| 78 |
-
status_formated = format_status([GPT_TYPES[0]], headers)
|
| 79 |
rpm.append(status_formated[0])
|
| 80 |
tpm.append(status_formated[1])
|
| 81 |
quota.append(status_formated[2])
|
|
@@ -139,12 +139,11 @@ def send_oai_completions(oai_stuff):
|
|
| 139 |
def helper_oai(oai_stuff):
|
| 140 |
return send_oai_completions(oai_stuff)
|
| 141 |
|
| 142 |
-
def format_status(list_models_avai, headers):
|
| 143 |
rpm = []
|
| 144 |
tpm = []
|
| 145 |
quota = ""
|
| 146 |
-
|
| 147 |
-
args = [(r, headers, model) for model in list_models_avai]
|
| 148 |
with concurrent.futures.ThreadPoolExecutor() as executer:
|
| 149 |
for result in executer.map(helper_oai, args):
|
| 150 |
rpm.append(result[0])
|
|
@@ -171,23 +170,23 @@ def check_key_tier(rpm, tpm, dict, headers):
|
|
| 171 |
if (dictCount == dictItemsCount):
|
| 172 |
return "yes | custom-tier"
|
| 173 |
|
| 174 |
-
def get_orgs(key):
|
| 175 |
headers=get_headers(key)
|
| 176 |
-
rq =
|
| 177 |
return rq.json()['data']
|
| 178 |
|
| 179 |
-
def get_models(key, org: str = None):
|
| 180 |
if org != None:
|
| 181 |
headers = get_headers(key, org)
|
| 182 |
else:
|
| 183 |
headers = get_headers(key)
|
| 184 |
-
rq =
|
| 185 |
avai_models = rq.json()
|
| 186 |
return [model["id"] for model in avai_models["data"]] #[model["id"] for model in avai_models["data"] if model["id"] in GPT_TYPES]
|
| 187 |
|
| 188 |
-
def check_key_availability(key):
|
| 189 |
try:
|
| 190 |
-
return get_orgs(key)
|
| 191 |
except Exception as e:
|
| 192 |
return False
|
| 193 |
|
|
|
|
| 33 |
headers["OpenAI-Organization"] = org_id
|
| 34 |
return headers
|
| 35 |
|
| 36 |
+
def get_subscription(key, session, org_list):
|
| 37 |
has_gpt4 = False
|
| 38 |
has_gpt4_32k = False
|
| 39 |
default_org = ""
|
|
|
|
| 46 |
list_models_avai = set()
|
| 47 |
|
| 48 |
for org_in in org_list:
|
| 49 |
+
available_models = get_models(session, key, org_in['id'])
|
| 50 |
headers = get_headers(key, org_in['id'])
|
| 51 |
has_gpt4_32k = True if GPT_TYPES[2] in available_models else False
|
| 52 |
has_gpt4 = True if GPT_TYPES[1] in available_models else False
|
|
|
|
| 57 |
if has_gpt4_32k:
|
| 58 |
org.append(f"{org_in['id']} ({org_in['name']}, {org_in['title']}, {org_in['role']})")
|
| 59 |
list_models_avai.update(GPT_TYPES)
|
| 60 |
+
status_formated = format_status([GPT_TYPES[2], GPT_TYPES[1], GPT_TYPES[0]], session, headers)
|
| 61 |
rpm.append(status_formated[0])
|
| 62 |
tpm.append(status_formated[1])
|
| 63 |
quota.append(status_formated[2])
|
|
|
|
| 66 |
elif has_gpt4:
|
| 67 |
org.append(f"{org_in['id']} ({org_in['name']}, {org_in['title']}, {org_in['role']})")
|
| 68 |
list_models_avai.update([GPT_TYPES[1], GPT_TYPES[0]])
|
| 69 |
+
status_formated = format_status([GPT_TYPES[1], GPT_TYPES[0]], session, headers)
|
| 70 |
rpm.append(status_formated[0])
|
| 71 |
tpm.append(status_formated[1])
|
| 72 |
quota.append(status_formated[2])
|
|
|
|
| 75 |
else:
|
| 76 |
org.append(f"{org_in['id']} ({org_in['name']}, {org_in['title']}, {org_in['role']})")
|
| 77 |
list_models_avai.update([GPT_TYPES[0]])
|
| 78 |
+
status_formated = format_status([GPT_TYPES[0]], session, headers)
|
| 79 |
rpm.append(status_formated[0])
|
| 80 |
tpm.append(status_formated[1])
|
| 81 |
quota.append(status_formated[2])
|
|
|
|
| 139 |
def helper_oai(oai_stuff):
|
| 140 |
return send_oai_completions(oai_stuff)
|
| 141 |
|
| 142 |
+
def format_status(list_models_avai, session, headers):
|
| 143 |
rpm = []
|
| 144 |
tpm = []
|
| 145 |
quota = ""
|
| 146 |
+
args = [(session, headers, model) for model in list_models_avai]
|
|
|
|
| 147 |
with concurrent.futures.ThreadPoolExecutor() as executer:
|
| 148 |
for result in executer.map(helper_oai, args):
|
| 149 |
rpm.append(result[0])
|
|
|
|
| 170 |
if (dictCount == dictItemsCount):
|
| 171 |
return "yes | custom-tier"
|
| 172 |
|
| 173 |
+
def get_orgs(session, key):
|
| 174 |
headers=get_headers(key)
|
| 175 |
+
rq = session.get(f"{BASE_URL}/organizations", headers=headers, timeout=10)
|
| 176 |
return rq.json()['data']
|
| 177 |
|
| 178 |
+
def get_models(session, key, org: str = None):
|
| 179 |
if org != None:
|
| 180 |
headers = get_headers(key, org)
|
| 181 |
else:
|
| 182 |
headers = get_headers(key)
|
| 183 |
+
rq = session.get(f"{BASE_URL}/models", headers=headers, timeout=10)
|
| 184 |
avai_models = rq.json()
|
| 185 |
return [model["id"] for model in avai_models["data"]] #[model["id"] for model in avai_models["data"] if model["id"] in GPT_TYPES]
|
| 186 |
|
| 187 |
+
def check_key_availability(session, key):
|
| 188 |
try:
|
| 189 |
+
return get_orgs(session, key)
|
| 190 |
except Exception as e:
|
| 191 |
return False
|
| 192 |
|