Commit
·
e131090
1
Parent(s):
a5d946f
Update api_usage.py
Browse files- api_usage.py +13 -17
api_usage.py
CHANGED
|
@@ -22,37 +22,25 @@ def get_subscription(key):
|
|
| 22 |
#results = r.json()
|
| 23 |
body_turbo = {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
|
| 24 |
body_gpt4 = {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
|
|
|
|
| 25 |
|
| 26 |
if check_key_availability():
|
| 27 |
rpm = ""
|
| 28 |
org = ""
|
| 29 |
quota = ""
|
| 30 |
-
r = requests.post(queryUrl, headers=headers, json=body_gpt4 if
|
| 31 |
result = r.json()
|
| 32 |
if "id" in result:
|
| 33 |
rpm = r.headers['x-ratelimit-limit-requests']
|
| 34 |
org = r.headers['openai-organization']
|
| 35 |
-
quota = "
|
| 36 |
else:
|
| 37 |
e = result["error"]["code"]
|
| 38 |
-
rpm = "";
|
| 39 |
-
org = "";
|
| 40 |
quota = f"Error: {e}"
|
| 41 |
-
|
| 42 |
-
#has_payment_method = results["has_payment_method"]
|
| 43 |
-
# hard_limit = results["hard_limit"]
|
| 44 |
-
#soft_limit_usd = results["soft_limit_usd"]
|
| 45 |
-
#hard_limit_usd = results["hard_limit_usd"]
|
| 46 |
-
#plan_title = results["plan"]["title"]
|
| 47 |
-
#plan_id = results["plan"]["id"]
|
| 48 |
-
#account_name = results["account_name"]
|
| 49 |
return {"organization": org,
|
| 50 |
"rpm": rpm,
|
| 51 |
-
"quota": quota}
|
| 52 |
-
#"has_payment_method": has_payment_method,
|
| 53 |
-
#"soft_limit_usd": soft_limit_usd,
|
| 54 |
-
#"hard_limit_usd": hard_limit_usd,
|
| 55 |
-
#"plan": plan_title + ", " + plan_id}
|
| 56 |
else:
|
| 57 |
return {"organization": "",
|
| 58 |
"rpm": "",
|
|
@@ -72,6 +60,14 @@ def get_subscription(key):
|
|
| 72 |
# else:
|
| 73 |
# return ""
|
| 74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
def check_gpt4_availability():
|
| 76 |
if check_key_availability():
|
| 77 |
available_models = [model["root"] for model in openai.Model.list()["data"]]
|
|
|
|
| 22 |
#results = r.json()
|
| 23 |
body_turbo = {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
|
| 24 |
body_gpt4 = {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
|
| 25 |
+
gpt4_avai = check_gpt4_availability()
|
| 26 |
|
| 27 |
if check_key_availability():
|
| 28 |
rpm = ""
|
| 29 |
org = ""
|
| 30 |
quota = ""
|
| 31 |
+
r = requests.post(queryUrl, headers=headers, json=body_gpt4 if gpt4_avai else body_turbo)
|
| 32 |
result = r.json()
|
| 33 |
if "id" in result:
|
| 34 |
rpm = r.headers['x-ratelimit-limit-requests']
|
| 35 |
org = r.headers['openai-organization']
|
| 36 |
+
quota = check_key_type("gpt-4" if gpt4_avai else "gpt-3.5-turbo", rpm)
|
| 37 |
else:
|
| 38 |
e = result["error"]["code"]
|
|
|
|
|
|
|
| 39 |
quota = f"Error: {e}"
|
| 40 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
return {"organization": org,
|
| 42 |
"rpm": rpm,
|
| 43 |
+
"quota": quota}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
else:
|
| 45 |
return {"organization": "",
|
| 46 |
"rpm": "",
|
|
|
|
| 60 |
# else:
|
| 61 |
# return ""
|
| 62 |
|
| 63 |
+
def check_key_type(model, rpm):
|
| 64 |
+
if rpm < rate_limit_per_model[model]:
|
| 65 |
+
return "yes | trial"
|
| 66 |
+
elif rpm == rate_limit_per_model[model]:
|
| 67 |
+
return "yes | pay"
|
| 68 |
+
else:
|
| 69 |
+
return "yes | pay, possibly big key."
|
| 70 |
+
|
| 71 |
def check_gpt4_availability():
|
| 72 |
if check_key_availability():
|
| 73 |
available_models = [model["root"] for model in openai.Model.list()["data"]]
|