Spaces:
Running
Running
import json | |
import time | |
import requests | |
import jwt | |
import asyncio | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 1. GCP μλΉμ€κ³μ μ‘μΈμ€ ν ν° | |
# βββββββββββββββββββββββββββββββββββββββββ | |
def get_access_token(client_email, private_key): | |
current_time = int(time.time()) | |
expiration_time = current_time + 600 # 10 λΆ | |
claims = { | |
"iss": client_email, | |
"scope": "https://www.googleapis.com/auth/cloud-platform", | |
"aud": "https://oauth2.googleapis.com/token", | |
"exp": expiration_time, | |
"iat": current_time, | |
} | |
try: | |
signed_jwt = jwt.encode(claims, private_key, algorithm="RS256") | |
except Exception as e: | |
return False, str(e) | |
response = requests.post( | |
"https://oauth2.googleapis.com/token", | |
data={ | |
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer", | |
"assertion": signed_jwt, | |
}, | |
) | |
if response.status_code == 200: | |
return True, response.json()["access_token"] | |
return False, response.text | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 2. GCP refresh-token μ‘μΈμ€ ν ν° | |
# βββββββββββββββββββββββββββββββββββββββββ | |
def get_access_token_refresh(client_id, client_secret, refresh_token): | |
token_url = "https://oauth2.googleapis.com/token" | |
data = { | |
"client_id": client_id, | |
"client_secret": client_secret, | |
"refresh_token": refresh_token, | |
"grant_type": "refresh_token", | |
} | |
resp = requests.post(token_url, data=data) | |
if resp.status_code == 200: | |
return True, resp.json()["access_token"] | |
return False, resp.text | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 3. Gemini λͺ¨λΈ λͺ©λ‘ (νμ μ 10κ°κΉμ§λ§) | |
# βββββββββββββββββββββββββββββββββββββββββ | |
def get_gemini_models(key, max_return: int = 10): | |
url = ( | |
"https://generativelanguage.googleapis.com/v1beta/models" | |
f"?key={key}&pageSize=1000" | |
) | |
resp = requests.get(url) | |
if resp.status_code != 200: | |
return "" | |
models = resp.json().get("models", []) | |
names = [m["name"].split("/")[1] for m in models] | |
if len(names) > max_return: | |
return names[:max_return] + [f"...(+{len(names)-max_return})"] | |
return names | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 4. Gemini ν€ κ°λ¨ μν μ²΄ν¬ | |
# βββββββββββββββββββββββββββββββββββββββββ | |
def check_key_gemini_availability(key): | |
""" | |
λ°νκ°: (bool, str) | |
β’ True, 'ok' : μ μ | |
β’ False, 'exceed' : μ¬μ©λ/μΏΌν° μ΄κ³Ό | |
β’ False, 'invalid' : μλͺ»λ ν€/κΆν μμ | |
β’ False, 'error' : κΈ°ν μ€λ₯ | |
""" | |
# λ°©λ² 1) λͺ¨λΈ λͺ©λ‘ μλ | |
model_list = get_gemini_models(key, 1) | |
if model_list != "": | |
return True, "ok" | |
# λ°©λ² 2) λλ―Έ μμ² μλ | |
url = ( | |
"https://generativelanguage.googleapis.com/v1beta/models/" | |
"gemini-1.5-flash:generateContent" | |
f"?key={key}" | |
) | |
payload = { | |
"contents": [{"role": "user", "parts": [{"text": ""}]}], | |
"generationConfig": {"maxOutputTokens": 0}, | |
} | |
try: | |
resp = requests.post(url, headers={"Content-Type": "application/json"}, json=payload) | |
except Exception: | |
return False, "error" | |
if resp.status_code == 200: | |
return True, "ok" | |
err = resp.json().get("error", {}) | |
code = err.get("code", 0) | |
status = err.get("status", "") | |
if status == "INVALID_ARGUMENT": | |
return True, "ok" | |
if code == 429 or status == "RESOURCE_EXHAUSTED": | |
return False, "exceed" | |
if code in (401, 403) or status in ("PERMISSION_DENIED", "UNAUTHENTICATED"): | |
return False, "invalid" | |
return False, "error" | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 5. Gemini μ€μ μμ² (νμ μ) | |
# βββββββββββββββββββββββββββββββββββββββββ | |
def send_gemini_request(key, payload, model: str = "gemini-1.5-flash"): | |
url = ( | |
"https://generativelanguage.googleapis.com/v1beta/models/" | |
f"{model}:generateContent?key={key}" | |
) | |
resp = requests.post(url, headers={"Content-Type": "application/json"}, json=payload) | |
if resp.status_code == 200: | |
return True, resp.json() | |
return False, resp.text | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 6. Vertex AI (Anthropic) λΉλκΈ° μμ² | |
# βββββββββββββββββββββββββββββββββββββββββ | |
async def send_gcp_request( | |
session, | |
project_id, | |
access_token, | |
payload, | |
region="us-east5", | |
model="claude-3-5-sonnet@20240620", | |
): | |
VERTEX_URL = ( | |
f"https://{region}-aiplatform.googleapis.com/v1/projects/" | |
f"{project_id}/locations/{region}/publishers/anthropic/models/{model}:streamRawPredict" | |
) | |
headers = { | |
"Authorization": f"Bearer {access_token}", | |
"Content-Type": "application/json; charset=utf-8", | |
} | |
async with session.post(url=VERTEX_URL, headers=headers, data=payload) as resp: | |
if resp.status != 200: | |
return json.loads(await resp.text()) | |
return await resp.json() | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 7. λ€μ€ ν€ κ°λ¨ κ²μ¬ μμ ν¨μ | |
# βββββββββββββββββββββββββββββββββββββββββ | |
async def quick_check(keys: list[str]): | |
""" | |
μΈμλ‘ λ°μ ν€λ€μ Google Gemini ν€λΌκ³ κ°μ νκ³ | |
(key, availability, status) 3-νλ 리μ€νΈ λ°ν. | |
νμνλ€λ©΄ μ¬κΈ°μ λ€λ₯Έ ν΄λΌμ°λ μ²΄ν¬ λ£¨ν΄μ νΈμΆνλλ‘ νμ₯. | |
""" | |
results = [] | |
for k in keys: | |
ok, st = check_key_gemini_availability(k.strip()) | |
results.append({"key": k.strip(), "availability": ok, "status": st}) | |
return results | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 8. λ¨λ μ€ν ν μ€νΈ | |
# βββββββββββββββββββββββββββββββββββββββββ | |
if __name__ == "__main__": | |
# μμ ν€ λͺ©λ‘ (κ°μ 보μ ν€λ‘ κ΅μ²΄) | |
test_keys = [ | |
"AIzaSyExampleINVALIDKEY_for_demo1", | |
"AIzaSyExampleINVALIDKEY_for_demo2", | |
] | |
# λκΈ° μ€ν μμ | |
final = asyncio.run(quick_check(test_keys)) | |
print(json.dumps(final, indent=2, ensure_ascii=False)) | |