prxyasd's picture
Update app.py
ced52ae verified
raw
history blame
17.4 kB
import asyncio
import gradio as gr
import requests
import re
from api_usage import (
get_subscription,
check_key_availability,
get_orgs_me,
check_key_ant_availability,
check_ant_rate_limit,
check_key_gemini_availability,
check_key_azure_availability,
get_azure_status,
get_azure_deploy,
check_key_mistral_availability,
check_mistral_quota,
check_key_replicate_availability,
check_key_aws_availability,
check_key_or_availability,
check_key_or_limits,
check_gcp_anthropic,
check_groq_status,
check_nai_status,
check_elevenlabs_status,
check_xai_status,
check_stability_status,
check_deepseek_status,
)
# ─────────────────────────────────────────
# Key-specific helper functions (원본 μœ μ§€)
# ─────────────────────────────────────────
def get_key_oai_info(key):
session = requests.Session()
status, org_data = check_key_availability(session, key)
info_dict = {
"key_type": "OpenAI",
"key_availability": True if status else False,
"gpt4_availability": "",
"gpt4_32k_availability": "",
"default_org": "",
"org_description": "",
"organization": "",
"models": "",
"requests_per_minute": "",
"tokens_per_minute": "",
"quota": "",
"all_models": "",
}
if not status:
return info_dict
if status == 403:
status_me, orgs_me = get_orgs_me(session, key)
if status_me == 200:
org_data = orgs_me
subscription_info = get_subscription(key, session, org_data)
info_dict.update(
{
"gpt4_availability": subscription_info["has_gpt4"],
"gpt4_32k_availability": subscription_info["has_gpt4_32k"],
"default_org": subscription_info["default_org"],
"org_description": subscription_info["org_description"],
"organization": subscription_info["organization"],
"models": subscription_info["models"],
"requests_per_minute": subscription_info["rpm"],
"tokens_per_minute": subscription_info["tpm"],
"quota": subscription_info["quota"],
"all_models": subscription_info["all_models"],
}
)
return info_dict
async def get_key_ant_info(key, rate_limit, claude_model):
key_avai = await check_key_ant_availability(key, claude_model)
info_dict = {
"key_type": "Anthropic Claude",
"key_availability": key_avai[0],
"status": "",
"filter_response": "",
"requests_per_minute": "",
"tokens_per_minute": "",
"tokens_input_per_minute": "",
"tokens_output_per_minute": "",
"tier": "",
"concurrent_rate_limit": "",
"models": "",
}
info_dict["status"] = key_avai[1]
info_dict["filter_response"] = key_avai[2]
info_dict["requests_per_minute"] = key_avai[3] + ("" if key_avai[3] == "" else f" ({key_avai[4]} left)")
info_dict["tokens_per_minute"] = key_avai[5] + ("" if key_avai[5] == "" else f" ({key_avai[6]} left)")
info_dict["tokens_input_per_minute"] = key_avai[8] + ("" if key_avai[8] == "" else f" ({key_avai[9]} left)")
info_dict["tokens_output_per_minute"] = key_avai[10] + ("" if key_avai[10] == "" else f" ({key_avai[11]} left)")
info_dict["tier"] = key_avai[7]
info_dict["models"] = key_avai[12]
if rate_limit:
rate = await check_ant_rate_limit(key, claude_model)
info_dict["concurrent_rate_limit"] = rate
return info_dict
def get_key_gemini_info(key):
key_avai = check_key_gemini_availability(key)
info_dict = {
"key_type": "Google Gemini",
"key_availability": key_avai[0],
"status": key_avai[1],
"models": key_avai[2],
}
return info_dict
def get_key_azure_info(endpoint, api_key):
key_avai = check_key_azure_availability(endpoint, api_key)
info_dict = {
"key_type": "Microsoft Azure OpenAI",
"key_availability": key_avai[0],
"gpt35_availability": "",
"gpt4_availability": "",
"gpt4_32k_availability": "",
"dall_e_3_availability": "",
"moderation_status": "",
"models": "",
"deployments": "",
}
if key_avai[0]:
azure_deploy = get_azure_deploy(endpoint, api_key)
status = get_azure_status(endpoint, api_key, azure_deploy)
info_dict["gpt35_availability"] = status[1]
info_dict["gpt4_availability"] = status[2]
info_dict["gpt4_32k_availability"] = status[3]
info_dict["dall_e_3_availability"] = status[4]
info_dict["moderation_status"] = status[0]
info_dict["models"] = key_avai[1]
info_dict["deployments"] = azure_deploy
return info_dict
def get_key_mistral_info(key):
key_avai = check_key_mistral_availability(key)
info_dict = {
"key_type": "Mistral AI",
"key_availability": True if key_avai else False,
"has_quota": "",
"limits": "",
"models": "",
}
if key_avai:
quota_info = check_mistral_quota(key)
info_dict["has_quota"] = quota_info[0]
if quota_info[1]:
info_dict["limits"] = quota_info[1]
info_dict["models"] = key_avai
return info_dict
def get_key_replicate_info(key):
key_avai = check_key_replicate_availability(key)
info_dict = {
"key_type": "Replicate",
"key_availability": key_avai[0],
"account_name": "",
"type": "",
"has_quota": "",
"hardware_available": "",
}
if key_avai[0]:
info_dict["account_name"] = key_avai[1]["username"]
info_dict["type"] = key_avai[1]["type"]
info_dict["has_quota"] = key_avai[2]
info_dict["hardware_available"] = key_avai[3]
return info_dict
async def get_key_aws_info(key):
key_avai = await check_key_aws_availability(key)
info_dict = {
"key_type": "Amazon AWS Claude",
"key_availability": key_avai[0],
"username": "",
"root": "",
"admin": "",
"quarantine": "",
"iam_full_access": "",
"iam_user_change_password": "",
"aws_bedrock_full_access": "",
"enabled_region": "",
"models_usage": "",
"cost_and_usage": key_avai[1],
}
if key_avai[0]:
info_dict["username"] = key_avai[1]
info_dict["root"] = key_avai[2]
info_dict["admin"] = key_avai[3]
info_dict["quarantine"] = key_avai[4]
info_dict["iam_full_access"] = key_avai[5]
info_dict["iam_user_change_password"] = key_avai[6]
info_dict["aws_bedrock_full_access"] = key_avai[7]
info_dict["enabled_region"] = key_avai[8]
info_dict["models_usage"] = key_avai[9]
info_dict["cost_and_usage"] = key_avai[10]
return info_dict
def get_key_openrouter_info(key):
key_avai = check_key_or_availability(key)
info_dict = {
"key_type": "OpenRouter",
"key_availability": key_avai[0],
"is_free_tier": "",
"usage": "",
"balance": "",
"limit": "",
"limit_remaining": "",
"rate_limit_per_minite": "",
"4_turbo_per_request_tokens_limit": "",
"sonnet_per_request_tokens_limit": "",
"opus_per_request_tokens_limit": "",
}
if key_avai[0]:
models_info = check_key_or_limits(key)
info_dict["is_free_tier"] = key_avai[1]["is_free_tier"]
info_dict["limit"] = key_avai[1]["limit"]
info_dict["limit_remaining"] = key_avai[1]["limit_remaining"]
info_dict["usage"] = f"${format(key_avai[1]['usage'], '.4f')}"
info_dict["balance"] = (
f"${format(models_info[0], '.4f')}" if models_info[0] else f"${key_avai[2]/60} (estimated)"
)
info_dict["rate_limit_per_minite"] = key_avai[2]
info_dict["4_turbo_per_request_tokens_limit"] = models_info[1]["openai/gpt-4o"]
info_dict["sonnet_per_request_tokens_limit"] = models_info[1]["anthropic/claude-3.5-sonnet:beta"]
info_dict["opus_per_request_tokens_limit"] = models_info[1]["anthropic/claude-3-opus:beta"]
else:
info_dict["usage"] = key_avai[1]
return info_dict
async def get_key_gcp_info(key, type):
key_avai = await check_gcp_anthropic(key, type)
info_dict = {
"key_type": "Vertex AI (GCP)",
"key_availability": key_avai[0],
"status": "",
"enabled_region": "",
}
if key_avai[0]:
info_dict["enabled_region"] = key_avai[2]
else:
info_dict["status"] = key_avai[1]
return info_dict
def get_key_groq_info(key):
key_avai = check_groq_status(key)
info_dict = {
"key_type": "Groq",
"key_availability": True if key_avai else False,
"models": key_avai if key_avai else "",
}
return info_dict
def get_key_nai_info(key):
key_avai = check_nai_status(key)
info_dict = {
"key_type": "NovelAI",
"key_availability": True if key_avai[0] else False,
"user_info": key_avai[1] if key_avai[0] else "",
}
return info_dict
def get_key_elevenlabs_info(key):
key_avai = check_elevenlabs_status(key)
info_dict = {
"key_type": "ElevenLabs",
"key_availability": key_avai[0],
"user_info": key_avai[1],
"voices_info": key_avai[2],
}
return info_dict
def get_key_xai_info(key):
key_avai = check_xai_status(key)
info_dict = {
"key_type": "xAI Grok",
"key_availability": key_avai[0],
"key_status": "",
"models": "",
}
if key_avai[0]:
info_dict["key_status"] = key_avai[1]
info_dict["models"] = key_avai[2]
return info_dict
def get_key_stability_info(key):
key_avai = check_stability_status(key)
info_dict = {
"key_type": "Stability AI",
"key_availability": key_avai[0],
"account_info": "",
"credits": "",
"models": "",
}
if key_avai[0]:
info_dict["account_info"] = key_avai[1]
info_dict["credits"] = key_avai[2]
info_dict["models"] = key_avai[3]
return info_dict
def get_key_deepseek_info(key):
key_avai = check_deepseek_status(key)
info_dict = {
"key_type": "deepseek",
"key_availability": key_avai[0],
"balance": "",
"models": "",
}
if key_avai[0]:
info_dict["models"] = key_avai[1]
info_dict["balance"] = key_avai[2]
return info_dict
def not_supported(key):
return {
"key_type": "Not supported",
"status": "",
}
# ─────────────────────────────────────────
# μƒˆλ‘œ μΆ”κ°€: 단일 ν‚€ 비동기 처리
# ─────────────────────────────────────────
async def process_single_key(key: str, rate_limit: bool, claude_model: str) -> dict:
"""μ£Όμ–΄μ§„ key ν•˜λ‚˜λ₯Ό 뢄석해 정보 dict λ°˜ν™˜."""
_key = key.strip()
# OpenRouter
if re.match(re.compile(r"sk-or-v1-[a-z0-9]{64}"), _key):
return {"key": _key, **get_key_openrouter_info(_key)}
# Anthropic Claude
if re.match(re.compile(r"sk-ant-api03-[a-zA-Z0-9\-_]{93}AA"), _key) or (
_key.startswith("sk-ant-") and len(_key) == 93
) or (len(_key) == 89 and re.match(re.compile(r"sk-[a-zA-Z0-9]{86}"), _key)):
return {"key": _key, **await get_key_ant_info(_key, rate_limit, claude_model)}
# Stability
if re.match(re.compile(r"sk-[a-zA-Z0-9]{48}"), _key) and len(_key) == 51 and "T3BlbkFJ" not in _key:
return {"key": _key, **get_key_stability_info(_key)}
# Deepseek
if re.match(re.compile(r"sk-[a-f0-9]{32}"), _key):
return {"key": _key, **get_key_deepseek_info(_key)}
# OpenAI
if _key.startswith("sk-"):
return {"key": _key, **get_key_oai_info(_key)}
# Google Gemini
if _key.startswith("AIzaSy"):
return {"key": _key, **get_key_gemini_info(_key)}
# NovelAI
if _key.startswith("pst-"):
return {"key": _key, **get_key_nai_info(_key)}
# Replicate
if (_key.startswith("r8_") and len(_key) == 40) or (_key.islower() and len(_key) == 40):
return {"key": _key, **get_key_replicate_info(_key)}
# xAI
if _key.startswith("xai-"):
return {"key": _key, **get_key_xai_info(_key)}
# Azure endpoint: "name:key"
if len(_key.split(":")) == 2 and _key.split(":")[1].islower() and len(_key.split(":")[1]) == 32 and "openai.azure.com" not in _key.split(":")[1]:
endpoint, api_key = _key.split(":")
return {"key": _key, **get_key_azure_info(endpoint, api_key)}
# Azure endpoint: "https://xxx.openai.azure.com;key"
if "openai.azure.com" in _key.split(";")[0]:
endpoint, api_key = _key.split(";")
return {"key": _key, **get_key_azure_info(endpoint, api_key)}
# AWS
if _key.startswith("AKIA") and len(_key.split(":")[0]) == 20 and _key.split(":")[0].isupper():
return {"key": _key, **await get_key_aws_info(_key)}
# ElevenLabs
if re.match(re.compile(r"[a-f0-9]{32}"), _key) or re.match(re.compile(r"sk_[a-f0-9]{48}"), _key):
return {"key": _key, **get_key_elevenlabs_info(_key)}
# Mistral
if re.match(re.compile(r"[a-zA-Z0-9]{32}"), _key):
return {"key": _key, **get_key_mistral_info(_key)}
# Groq
if re.match(re.compile(r"gsk_[a-zA-Z0-9]{20}WGdyb3FY[a-zA-Z0-9]{24}"), _key):
return {"key": _key, **get_key_groq_info(_key)}
# GCP - refresh token
if re.match(re.compile(r"[\w\-]+:[\w\-@\.]+:[\w-]+:.+"), _key):
return {"key": _key, **await get_key_gcp_info(_key, 0)}
# GCP - service account
if re.match(re.compile(r"[\w\-]+:[\w\-@\.]+:.+\\n"), _key):
return {"key": _key, **await get_key_gcp_info(_key, 1)}
# Not supported
return {"key": _key, **not_supported(_key)}
# ─────────────────────────────────────────
# μ—¬λŸ¬ key 비동기 처리 ν•¨μˆ˜
# ─────────────────────────────────────────
async def sort_keys(text: str, rate_limit: bool, claude_model: str):
"""ν…μŠ€νŠΈ λ°•μŠ€μ— μž…λ ₯된 μ—¬λŸ¬ ν‚€λ₯Ό 쀄 λ‹¨μœ„λ‘œ 뢄석."""
keys = [k.strip() for k in text.splitlines() if k.strip()]
tasks = [process_single_key(k, rate_limit, claude_model) for k in keys]
results = await asyncio.gather(*tasks)
return results # gr.JSON μ»΄ν¬λ„ŒνŠΈκ°€ λ¦¬μŠ€νŠΈλ„ μžλ™ ν‘œμ‹œ
# ─────────────────────────────────────────
# UI util
# ─────────────────────────────────────────
def clear_inputs(text: str):
return ""
# ─────────────────────────────────────────
# Gradio UI
# ─────────────────────────────────────────
with gr.Blocks() as demo:
gr.Markdown(
"""
# OpenAI/Anthropic/Gemini/Azure/Mistral/Replicate/AWS Claude/OpenRouter/Vertex AI(GCP Anthropic)/Groq/NovelAI/ElevenLabs/xAI/Stability/Deepseek API Key Status Checker
*(Based on shaocongma, CncAnon1, su, Drago, kingbased key checkers)*
AWS credential format: `AWS_ACCESS_KEY_ID:AWS_SECRET_ACCESS_KEY` (root might not be accurate)
Azure format: `RESOURCE_NAME:API_KEY` **or** `https://RESOURCE_NAME.openai.azure.com;API_KEY`
GCP format:
β€’ Service account β†’ `PROJECT_ID:CLIENT_EMAIL:PRIVATE_KEY(\\n 포함)`
β€’ Refresh token β†’ `PROJECT_ID:CLIENT_ID:CLIENT_SECRET:REFRESH_TOKEN`
"""
)
claude_options = [
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
"claude-3-5-sonnet-20240620",
"claude-3-5-sonnet-20241022",
"claude-3-5-haiku-20241022",
]
with gr.Row():
with gr.Column():
key_box = gr.Textbox(
lines=5,
max_lines=20,
label="API Key(s) β€” μ€„λ°”κΏˆμœΌλ‘œ μ—¬λŸ¬ 개 μž…λ ₯",
placeholder="각 μ€„λ§ˆλ‹€ ν•˜λ‚˜μ˜ ν‚€λ₯Ό μž…λ ₯ν•˜μ„Έμš”",
)
claude_model = gr.Dropdown(
claude_options,
value="claude-3-haiku-20240307",
label="Claude API model (filter/concurrent check용)",
)
rate_limit = gr.Checkbox(label="Check concurrent rate limit (Claude, experimental)")
with gr.Row():
clear_button = gr.Button("Clear")
submit_button = gr.Button("Submit", variant="primary")
with gr.Column():
info = gr.JSON(label="API Key Information", open=True)
clear_button.click(fn=clear_inputs, inputs=[key_box], outputs=[key_box])
submit_button.click(
fn=sort_keys,
inputs=[key_box, rate_limit, claude_model],
outputs=[info],
api_name="sort_keys",
)
demo.launch()