Spaces:
Running
Running
import json | |
import time | |
import requests | |
import jwt | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 1. GCP μλΉμ€κ³μ μ‘μΈμ€ ν ν° | |
# βββββββββββββββββββββββββββββββββββββββββ | |
def get_access_token(client_email, private_key): | |
current_time = int(time.time()) | |
expiration_time = current_time + 600 # 10 λΆ | |
claims = { | |
"iss": client_email, | |
"scope": "https://www.googleapis.com/auth/cloud-platform", | |
"aud": "https://oauth2.googleapis.com/token", | |
"exp": expiration_time, | |
"iat": current_time, | |
} | |
try: | |
signed_jwt = jwt.encode(claims, private_key, algorithm="RS256") | |
except Exception as e: | |
return False, e | |
response = requests.post( | |
"https://oauth2.googleapis.com/token", | |
data={ | |
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer", | |
"assertion": signed_jwt, | |
}, | |
) | |
if response.status_code == 200: | |
return True, response.json()["access_token"] | |
else: | |
return False, response.text | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 2. GCP refresh-token μ‘μΈμ€ ν ν° | |
# βββββββββββββββββββββββββββββββββββββββββ | |
def get_access_token_refresh(client_id, client_secret, refresh_token): | |
token_url = "https://oauth2.googleapis.com/token" | |
data = { | |
"client_id": client_id, | |
"client_secret": client_secret, | |
"refresh_token": refresh_token, | |
"grant_type": "refresh_token", | |
} | |
response = requests.post(token_url, data=data) | |
if response.status_code == 200: | |
return True, response.json()["access_token"] | |
else: | |
return False, response.text | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 3. (μ ν) λͺ¨λΈ λͺ©λ‘ μ‘°ν β μ΅λ 10κ°λ§ λ°ν | |
# βββββββββββββββββββββββββββββββββββββββββ | |
def get_gemini_models(key, max_return: int = 1): | |
""" | |
λͺ¨λΈμ΄ λ무 λ§μ κ°λ μ±μ΄ λ¨μ΄μ§λ―λ‘, | |
κΈ°λ³Έμ μΌλ‘ 10κ°κΉμ§λ§ λ°ννκ³ λλ¨Έμ§λ κ°μλ‘ μμ½. | |
""" | |
url = f"https://generativelanguage.googleapis.com/v1beta/models?key={key}&pageSize=1000" | |
response = requests.get(url) | |
if response.status_code != 200: | |
return "" | |
models = response.json().get("models", []) | |
names = [m["name"].split("/")[1] for m in models] | |
#if len(names) > max_return: | |
# return names[:max_return] + [f"...(+{len(names)-max_return})"] | |
return None | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 4. βλλ―Έβ μμ²μΌλ‘ ν€ μνλ§ νλ³ | |
# βββββββββββββββββββββββββββββββββββββββββ | |
def send_fake_gemini_request(key, model: str = "gemini-1.5-flash"): | |
""" | |
ν둬ννΈλ₯Ό λΉ λ¬Έμμ΄λ‘ λ³΄λ΄ κ°λ¨ν ν€ μ ν¨μ±μ 체ν¬. | |
λ°νκ°: error dict | '' | None | |
""" | |
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={key}" | |
payload = { | |
"contents": [{"role": "user", "parts": [{"text": ""}]}], | |
"generationConfig": {"maxOutputTokens": 0}, | |
} | |
try: | |
resp = requests.post(url, headers={"Content-Type": "application/json"}, json=payload) | |
return resp.json().get("error", "") | |
except Exception: | |
return None | |
def check_key_gemini_availability(key): | |
""" | |
βΆ λ°ν: (bool, str) | |
β’ (True, 'ok') : ν€ μ ν¨, μ¬μ© κ°λ₯ | |
β’ (False, 'exceed') : quota / rate-limit μ΄κ³Ό | |
β’ (False, 'invalid') : μλͺ»λ ν€ νΉμ κΆν μμ | |
β’ (False, 'error') : κΈ°ν λ€νΈμν¬/μ μ μλ μ€λ₯ | |
""" | |
err = send_fake_gemini_request(key) | |
# λ€νΈμν¬ μ€ν¨ | |
if err is None: | |
return False, "error" | |
# μλ¬ νλκ° μμΌλ©΄ μ μ | |
if err == "": | |
return True, "ok" | |
# μλ¬ κ°μ²΄ λΆμ | |
code = err.get("code", 0) | |
status = err.get("status", "") | |
# λΉ ν둬ννΈ λλ¬Έμ INVALID_ARGUMENTκ° λ¨λ κ²½μ° β ν€λ μ μ | |
if status == "INVALID_ARGUMENT": | |
return True, "ok" | |
# quota μ΄κ³Ό | |
if code == 429 or status == "RESOURCE_EXHAUSTED": | |
return False, "exceed" | |
# κΆν/μΈμ¦ λ¬Έμ | |
if code in (401, 403) or status in ("PERMISSION_DENIED", "UNAUTHENTICATED"): | |
return False, "invalid" | |
# κΈ°ν | |
return False, "error" | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 5. μ€μ Gemini μμ² | |
# βββββββββββββββββββββββββββββββββββββββββ | |
def send_gemini_request(key, payload, model: str = "gemini-1.5-flash"): | |
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={key}" | |
resp = requests.post(url, headers={"Content-Type": "application/json"}, json=payload) | |
if resp.status_code == 200: | |
return True, resp.json() | |
else: | |
return False, resp.text | |
# βββββββββββββββββββββββββββββββββββββββββ | |
# 6. Vertex AI (Anthropic) μμ² β λΉλκΈ° | |
# βββββββββββββββββββββββββββββββββββββββββ | |
async def send_gcp_request( | |
session, project_id, access_token, payload, region="us-east5", model="claude-3-5-sonnet@20240620" | |
): | |
VERTEX_URL = ( | |
f"https://{region}-aiplatform.googleapis.com/v1/projects/" | |
f"{project_id}/locations/{region}/publishers/anthropic/models/{model}:streamRawPredict" | |
) | |
headers = { | |
"Authorization": f"Bearer {access_token}", | |
"Content-Type": "application/json; charset=utf-8", | |
} | |
async with session.post(url=VERTEX_URL, headers=headers, data=payload) as response: | |
if response.status != 200: | |
return json.loads(await response.text()) | |
return await response.json() | |