Spaces:
Running
Running
Commit
·
684b78c
1
Parent(s):
9119b63
added openai mode
Browse files- gemini_proxy.py +0 -1005
- requirements.txt +3 -4
gemini_proxy.py
DELETED
@@ -1,1005 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import requests
|
4 |
-
import re
|
5 |
-
import uvicorn
|
6 |
-
import base64
|
7 |
-
import platform
|
8 |
-
import time
|
9 |
-
from datetime import datetime
|
10 |
-
from fastapi import FastAPI, Request, Response, HTTPException, Depends
|
11 |
-
from fastapi.responses import StreamingResponse
|
12 |
-
from fastapi.security import HTTPBasic, HTTPBasicCredentials
|
13 |
-
from fastapi.middleware.cors import CORSMiddleware
|
14 |
-
from http.server import BaseHTTPRequestHandler, HTTPServer
|
15 |
-
from urllib.parse import urlparse, parse_qs
|
16 |
-
import ijson
|
17 |
-
from dotenv import load_dotenv
|
18 |
-
|
19 |
-
from google.oauth2.credentials import Credentials
|
20 |
-
from google_auth_oauthlib.flow import Flow
|
21 |
-
from google.auth.transport.requests import Request as GoogleAuthRequest
|
22 |
-
from google.auth.exceptions import RefreshError
|
23 |
-
|
24 |
-
# Load environment variables from .env file
|
25 |
-
load_dotenv()
|
26 |
-
|
27 |
-
# --- Configuration ---
|
28 |
-
CLIENT_ID = "681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com"
|
29 |
-
CLIENT_SECRET = "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"
|
30 |
-
SCOPES = [
|
31 |
-
"https://www.googleapis.com/auth/cloud-platform",
|
32 |
-
"https://www.googleapis.com/auth/userinfo.email",
|
33 |
-
"https://www.googleapis.com/auth/userinfo.profile",
|
34 |
-
]
|
35 |
-
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
36 |
-
CREDENTIAL_FILE = os.path.join(SCRIPT_DIR, "oauth_creds.json")
|
37 |
-
CODE_ASSIST_ENDPOINT = "https://cloudcode-pa.googleapis.com"
|
38 |
-
GEMINI_PORT = int(os.getenv("GEMINI_PORT", "8888")) # Default to 8888 if not set
|
39 |
-
GEMINI_AUTH_PASSWORD = os.getenv("GEMINI_AUTH_PASSWORD", "123456") # Default password
|
40 |
-
CLI_VERSION = "0.1.5" # Match current gemini-cli version
|
41 |
-
|
42 |
-
# --- Global State ---
|
43 |
-
credentials = None
|
44 |
-
user_project_id = None
|
45 |
-
onboarding_complete = False
|
46 |
-
|
47 |
-
app = FastAPI()
|
48 |
-
security = HTTPBasic()
|
49 |
-
|
50 |
-
# Add CORS middleware for preflight requests
|
51 |
-
app.add_middleware(
|
52 |
-
CORSMiddleware,
|
53 |
-
allow_origins=["*"], # Allow all origins
|
54 |
-
allow_credentials=True,
|
55 |
-
allow_methods=["*"], # Allow all methods
|
56 |
-
allow_headers=["*"], # Allow all headers
|
57 |
-
)
|
58 |
-
|
59 |
-
def get_user_agent():
|
60 |
-
"""Generate User-Agent string matching gemini-cli format."""
|
61 |
-
version = CLI_VERSION
|
62 |
-
system = platform.system()
|
63 |
-
arch = platform.machine()
|
64 |
-
return f"GeminiCLI/{version} ({system}; {arch})"
|
65 |
-
|
66 |
-
def authenticate_user(request: Request):
|
67 |
-
"""Authenticate the user with multiple methods."""
|
68 |
-
# Check for API key in query parameters first (for Gemini client compatibility)
|
69 |
-
api_key = request.query_params.get("key")
|
70 |
-
if api_key and api_key == GEMINI_AUTH_PASSWORD:
|
71 |
-
return "api_key_user"
|
72 |
-
|
73 |
-
# Check for API key in x-goog-api-key header (Google SDK format)
|
74 |
-
goog_api_key = request.headers.get("x-goog-api-key", "")
|
75 |
-
if goog_api_key and goog_api_key == GEMINI_AUTH_PASSWORD:
|
76 |
-
return "goog_api_key_user"
|
77 |
-
|
78 |
-
# Check for API key in Authorization header (Bearer token format)
|
79 |
-
auth_header = request.headers.get("authorization", "")
|
80 |
-
if auth_header.startswith("Bearer "):
|
81 |
-
bearer_token = auth_header[7:]
|
82 |
-
if bearer_token == GEMINI_AUTH_PASSWORD:
|
83 |
-
return "bearer_user"
|
84 |
-
|
85 |
-
# Check for HTTP Basic Authentication
|
86 |
-
if auth_header.startswith("Basic "):
|
87 |
-
try:
|
88 |
-
encoded_credentials = auth_header[6:]
|
89 |
-
decoded_credentials = base64.b64decode(encoded_credentials).decode('utf-8')
|
90 |
-
username, password = decoded_credentials.split(':', 1)
|
91 |
-
if password == GEMINI_AUTH_PASSWORD:
|
92 |
-
return username
|
93 |
-
except Exception:
|
94 |
-
pass
|
95 |
-
|
96 |
-
# If none of the authentication methods work
|
97 |
-
raise HTTPException(
|
98 |
-
status_code=401,
|
99 |
-
detail="Invalid authentication credentials. Use HTTP Basic Auth, Bearer token, 'key' query parameter, or 'x-goog-api-key' header.",
|
100 |
-
headers={"WWW-Authenticate": "Basic"},
|
101 |
-
)
|
102 |
-
|
103 |
-
# Helper class to adapt a generator of bytes into a file-like object
|
104 |
-
# that ijson can read from.
|
105 |
-
class _GeneratorStream:
|
106 |
-
def __init__(self, generator):
|
107 |
-
self.generator = generator
|
108 |
-
self.buffer = b''
|
109 |
-
|
110 |
-
def read(self, size=-1):
|
111 |
-
# This read implementation is crucial for streaming.
|
112 |
-
# It must not block to read the entire stream if size is -1.
|
113 |
-
if size == -1:
|
114 |
-
# If asked to read all, return what's in the buffer and get one more chunk.
|
115 |
-
try:
|
116 |
-
self.buffer += next(self.generator)
|
117 |
-
except StopIteration:
|
118 |
-
pass
|
119 |
-
data = self.buffer
|
120 |
-
self.buffer = b''
|
121 |
-
return data
|
122 |
-
|
123 |
-
# Otherwise, read from the generator until we have enough bytes.
|
124 |
-
while len(self.buffer) < size:
|
125 |
-
try:
|
126 |
-
self.buffer += next(self.generator)
|
127 |
-
except StopIteration:
|
128 |
-
# Generator is exhausted.
|
129 |
-
break
|
130 |
-
|
131 |
-
data = self.buffer[:size]
|
132 |
-
self.buffer = self.buffer[size:]
|
133 |
-
return data
|
134 |
-
|
135 |
-
class _OAuthCallbackHandler(BaseHTTPRequestHandler):
|
136 |
-
auth_code = None
|
137 |
-
def do_GET(self):
|
138 |
-
query_components = parse_qs(urlparse(self.path).query)
|
139 |
-
code = query_components.get("code", [None])[0]
|
140 |
-
if code:
|
141 |
-
_OAuthCallbackHandler.auth_code = code
|
142 |
-
self.send_response(200)
|
143 |
-
self.send_header("Content-type", "text/html")
|
144 |
-
self.end_headers()
|
145 |
-
self.wfile.write(b"<h1>Authentication successful!</h1><p>You can close this window and restart the proxy.</p>")
|
146 |
-
else:
|
147 |
-
self.send_response(400)
|
148 |
-
self.send_header("Content-type", "text/html")
|
149 |
-
self.end_headers()
|
150 |
-
self.wfile.write(b"<h1>Authentication failed.</h1><p>Please try again.</p>")
|
151 |
-
|
152 |
-
def get_platform_string():
|
153 |
-
"""Generate platform string matching gemini-cli format."""
|
154 |
-
system = platform.system().upper()
|
155 |
-
arch = platform.machine().upper()
|
156 |
-
|
157 |
-
# Map to gemini-cli platform format
|
158 |
-
if system == "DARWIN":
|
159 |
-
if arch in ["ARM64", "AARCH64"]:
|
160 |
-
return "DARWIN_ARM64"
|
161 |
-
else:
|
162 |
-
return "DARWIN_AMD64"
|
163 |
-
elif system == "LINUX":
|
164 |
-
if arch in ["ARM64", "AARCH64"]:
|
165 |
-
return "LINUX_ARM64"
|
166 |
-
else:
|
167 |
-
return "LINUX_AMD64"
|
168 |
-
elif system == "WINDOWS":
|
169 |
-
return "WINDOWS_AMD64"
|
170 |
-
else:
|
171 |
-
return "PLATFORM_UNSPECIFIED"
|
172 |
-
|
173 |
-
def get_client_metadata(project_id=None):
|
174 |
-
return {
|
175 |
-
"ideType": "IDE_UNSPECIFIED",
|
176 |
-
"platform": get_platform_string(),
|
177 |
-
"pluginType": "GEMINI",
|
178 |
-
"duetProject": project_id,
|
179 |
-
}
|
180 |
-
|
181 |
-
def onboard_user(creds, project_id):
|
182 |
-
"""Ensures the user is onboarded, matching gemini-cli setupUser behavior."""
|
183 |
-
global onboarding_complete
|
184 |
-
if onboarding_complete:
|
185 |
-
return
|
186 |
-
|
187 |
-
# Refresh credentials if expired before making API calls
|
188 |
-
if creds.expired and creds.refresh_token:
|
189 |
-
print("Credentials expired. Refreshing before onboarding...")
|
190 |
-
try:
|
191 |
-
creds.refresh(GoogleAuthRequest())
|
192 |
-
save_credentials(creds)
|
193 |
-
print("Credentials refreshed successfully.")
|
194 |
-
except Exception as e:
|
195 |
-
print(f"Could not refresh credentials: {e}")
|
196 |
-
raise
|
197 |
-
|
198 |
-
print("Checking user onboarding status...")
|
199 |
-
headers = {
|
200 |
-
"Authorization": f"Bearer {creds.token}",
|
201 |
-
"Content-Type": "application/json",
|
202 |
-
"User-Agent": get_user_agent(),
|
203 |
-
}
|
204 |
-
|
205 |
-
# 1. Call loadCodeAssist to check current status
|
206 |
-
load_assist_payload = {
|
207 |
-
"cloudaicompanionProject": project_id,
|
208 |
-
"metadata": get_client_metadata(project_id),
|
209 |
-
}
|
210 |
-
|
211 |
-
try:
|
212 |
-
resp = requests.post(
|
213 |
-
f"{CODE_ASSIST_ENDPOINT}/v1internal:loadCodeAssist",
|
214 |
-
data=json.dumps(load_assist_payload),
|
215 |
-
headers=headers,
|
216 |
-
)
|
217 |
-
resp.raise_for_status()
|
218 |
-
load_data = resp.json()
|
219 |
-
|
220 |
-
# Determine the tier to use (current or default)
|
221 |
-
tier = None
|
222 |
-
if load_data.get("currentTier"):
|
223 |
-
tier = load_data["currentTier"]
|
224 |
-
print("User is already onboarded.")
|
225 |
-
else:
|
226 |
-
# Find default tier for onboarding
|
227 |
-
for allowed_tier in load_data.get("allowedTiers", []):
|
228 |
-
if allowed_tier.get("isDefault"):
|
229 |
-
tier = allowed_tier
|
230 |
-
break
|
231 |
-
|
232 |
-
if not tier:
|
233 |
-
# Fallback tier if no default found (matching gemini-cli logic)
|
234 |
-
tier = {
|
235 |
-
"name": "",
|
236 |
-
"description": "",
|
237 |
-
"id": "legacy-tier",
|
238 |
-
"userDefinedCloudaicompanionProject": True,
|
239 |
-
}
|
240 |
-
|
241 |
-
# Check if project ID is required but missing
|
242 |
-
if tier.get("userDefinedCloudaicompanionProject") and not project_id:
|
243 |
-
raise ValueError("This account requires setting the GOOGLE_CLOUD_PROJECT env var.")
|
244 |
-
|
245 |
-
# If already onboarded, skip the onboarding process
|
246 |
-
if load_data.get("currentTier"):
|
247 |
-
onboarding_complete = True
|
248 |
-
return
|
249 |
-
|
250 |
-
print(f"Onboarding user to tier: {tier.get('name', 'legacy-tier')}")
|
251 |
-
onboard_req_payload = {
|
252 |
-
"tierId": tier.get("id"),
|
253 |
-
"cloudaicompanionProject": project_id,
|
254 |
-
"metadata": get_client_metadata(project_id),
|
255 |
-
}
|
256 |
-
|
257 |
-
# 2. Poll onboardUser until complete (matching gemini-cli polling logic)
|
258 |
-
while True:
|
259 |
-
onboard_resp = requests.post(
|
260 |
-
f"{CODE_ASSIST_ENDPOINT}/v1internal:onboardUser",
|
261 |
-
data=json.dumps(onboard_req_payload),
|
262 |
-
headers=headers,
|
263 |
-
)
|
264 |
-
onboard_resp.raise_for_status()
|
265 |
-
lro_data = onboard_resp.json()
|
266 |
-
|
267 |
-
if lro_data.get("done"):
|
268 |
-
print("Onboarding successful.")
|
269 |
-
onboarding_complete = True
|
270 |
-
break
|
271 |
-
|
272 |
-
print("Onboarding in progress, waiting 5 seconds...")
|
273 |
-
time.sleep(5)
|
274 |
-
|
275 |
-
except requests.exceptions.HTTPError as e:
|
276 |
-
print(f"Error during onboarding: {e.response.text}")
|
277 |
-
raise
|
278 |
-
|
279 |
-
def get_user_project_id(creds):
|
280 |
-
"""Gets the user's project ID matching gemini-cli setupUser logic."""
|
281 |
-
global user_project_id
|
282 |
-
if user_project_id:
|
283 |
-
return user_project_id
|
284 |
-
|
285 |
-
# First, check for GOOGLE_CLOUD_PROJECT environment variable (matching gemini-cli)
|
286 |
-
env_project_id = os.getenv("GOOGLE_CLOUD_PROJECT")
|
287 |
-
if env_project_id:
|
288 |
-
user_project_id = env_project_id
|
289 |
-
print(f"Using project ID from GOOGLE_CLOUD_PROJECT: {user_project_id}")
|
290 |
-
save_credentials(creds, user_project_id)
|
291 |
-
return user_project_id
|
292 |
-
|
293 |
-
# Second, check for GEMINI_PROJECT_ID as fallback
|
294 |
-
gemini_env_project_id = os.getenv("GEMINI_PROJECT_ID")
|
295 |
-
if gemini_env_project_id:
|
296 |
-
user_project_id = gemini_env_project_id
|
297 |
-
print(f"Using project ID from GEMINI_PROJECT_ID: {user_project_id}")
|
298 |
-
save_credentials(creds, user_project_id)
|
299 |
-
return user_project_id
|
300 |
-
|
301 |
-
# Third, try to load project ID from credential file
|
302 |
-
if os.path.exists(CREDENTIAL_FILE):
|
303 |
-
try:
|
304 |
-
with open(CREDENTIAL_FILE, "r") as f:
|
305 |
-
creds_data = json.load(f)
|
306 |
-
cached_project_id = creds_data.get("project_id")
|
307 |
-
if cached_project_id:
|
308 |
-
user_project_id = cached_project_id
|
309 |
-
print(f"Loaded project ID from cache: {user_project_id}")
|
310 |
-
return user_project_id
|
311 |
-
except Exception as e:
|
312 |
-
print(f"Could not load project ID from cache: {e}")
|
313 |
-
|
314 |
-
# If not found in environment or cache, probe for it via loadCodeAssist
|
315 |
-
print("Project ID not found in environment or cache. Probing for user project ID...")
|
316 |
-
|
317 |
-
# Refresh credentials if expired before making API calls
|
318 |
-
if creds.expired and creds.refresh_token:
|
319 |
-
print("Credentials expired. Refreshing before project ID probe...")
|
320 |
-
try:
|
321 |
-
creds.refresh(GoogleAuthRequest())
|
322 |
-
save_credentials(creds)
|
323 |
-
print("Credentials refreshed successfully.")
|
324 |
-
except Exception as e:
|
325 |
-
print(f"Could not refresh credentials: {e}")
|
326 |
-
raise
|
327 |
-
|
328 |
-
headers = {
|
329 |
-
"Authorization": f"Bearer {creds.token}",
|
330 |
-
"Content-Type": "application/json",
|
331 |
-
"User-Agent": get_user_agent(),
|
332 |
-
}
|
333 |
-
|
334 |
-
probe_payload = {
|
335 |
-
"metadata": get_client_metadata(),
|
336 |
-
}
|
337 |
-
|
338 |
-
try:
|
339 |
-
resp = requests.post(
|
340 |
-
f"{CODE_ASSIST_ENDPOINT}/v1internal:loadCodeAssist",
|
341 |
-
data=json.dumps(probe_payload),
|
342 |
-
headers=headers,
|
343 |
-
)
|
344 |
-
resp.raise_for_status()
|
345 |
-
data = resp.json()
|
346 |
-
user_project_id = data.get("cloudaicompanionProject")
|
347 |
-
if not user_project_id:
|
348 |
-
raise ValueError("Could not find 'cloudaicompanionProject' in loadCodeAssist response.")
|
349 |
-
print(f"Successfully fetched user project ID: {user_project_id}")
|
350 |
-
|
351 |
-
save_credentials(creds, user_project_id)
|
352 |
-
print("Project ID saved to credential file for future use.")
|
353 |
-
|
354 |
-
return user_project_id
|
355 |
-
except requests.exceptions.HTTPError as e:
|
356 |
-
print(f"Error fetching project ID: {e.response.text}")
|
357 |
-
raise
|
358 |
-
|
359 |
-
def save_credentials(creds, project_id=None):
|
360 |
-
print(f"DEBUG: Saving credentials - Token: {creds.token[:20] if creds.token else 'None'}..., Expired: {creds.expired}, Expiry: {creds.expiry}")
|
361 |
-
|
362 |
-
creds_data = {
|
363 |
-
"client_id": CLIENT_ID,
|
364 |
-
"client_secret": CLIENT_SECRET,
|
365 |
-
"token": creds.token, # Use 'token' instead of 'access_token' for consistency with Google Auth Library
|
366 |
-
"refresh_token": creds.refresh_token,
|
367 |
-
"scopes": creds.scopes if creds.scopes else SCOPES, # Use 'scopes' as list instead of 'scope' as string
|
368 |
-
"token_uri": "https://oauth2.googleapis.com/token",
|
369 |
-
}
|
370 |
-
|
371 |
-
# Add expiry if available - ensure it's timezone-aware
|
372 |
-
if creds.expiry:
|
373 |
-
# Ensure the expiry is timezone-aware (UTC)
|
374 |
-
if creds.expiry.tzinfo is None:
|
375 |
-
from datetime import timezone
|
376 |
-
expiry_utc = creds.expiry.replace(tzinfo=timezone.utc)
|
377 |
-
else:
|
378 |
-
expiry_utc = creds.expiry
|
379 |
-
creds_data["expiry"] = expiry_utc.isoformat()
|
380 |
-
print(f"DEBUG: Saving expiry as: {creds_data['expiry']}")
|
381 |
-
else:
|
382 |
-
print("DEBUG: No expiry time available to save")
|
383 |
-
|
384 |
-
# If project_id is provided, save it; otherwise preserve existing project_id
|
385 |
-
if project_id:
|
386 |
-
creds_data["project_id"] = project_id
|
387 |
-
elif os.path.exists(CREDENTIAL_FILE):
|
388 |
-
try:
|
389 |
-
with open(CREDENTIAL_FILE, "r") as f:
|
390 |
-
existing_data = json.load(f)
|
391 |
-
if "project_id" in existing_data:
|
392 |
-
creds_data["project_id"] = existing_data["project_id"]
|
393 |
-
except Exception:
|
394 |
-
pass # If we can't read existing file, just continue without project_id
|
395 |
-
|
396 |
-
print(f"DEBUG: Final credential data to save: {json.dumps(creds_data, indent=2)}")
|
397 |
-
|
398 |
-
with open(CREDENTIAL_FILE, "w") as f:
|
399 |
-
json.dump(creds_data, f, indent=2)
|
400 |
-
|
401 |
-
print("DEBUG: Credentials saved to file")
|
402 |
-
|
403 |
-
def get_credentials():
|
404 |
-
"""Loads credentials matching gemini-cli OAuth2 flow."""
|
405 |
-
global credentials
|
406 |
-
|
407 |
-
# First, check if we already have valid credentials in memory
|
408 |
-
if credentials and credentials.token:
|
409 |
-
print("Using valid credentials from memory cache.")
|
410 |
-
print(f"DEBUG: Memory credentials - Token: {credentials.token[:20] if credentials.token else 'None'}..., Expired: {credentials.expired}, Expiry: {credentials.expiry}")
|
411 |
-
return credentials
|
412 |
-
else:
|
413 |
-
print("No valid credentials in memory. Loading from disk.")
|
414 |
-
|
415 |
-
# Check environment for credentials first
|
416 |
-
env_creds = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
|
417 |
-
if env_creds and os.path.exists(env_creds):
|
418 |
-
try:
|
419 |
-
with open(env_creds, "r") as f:
|
420 |
-
creds_data = json.load(f)
|
421 |
-
credentials = Credentials.from_authorized_user_info(creds_data, SCOPES)
|
422 |
-
print("Loaded credentials from GOOGLE_APPLICATION_CREDENTIALS.")
|
423 |
-
print(f"DEBUG: Env credentials - Token: {credentials.token[:20] if credentials.token else 'None'}..., Expired: {credentials.expired}, Expiry: {credentials.expiry}")
|
424 |
-
|
425 |
-
# Always refresh tokens at startup when loading from file to avoid issues
|
426 |
-
if credentials.refresh_token:
|
427 |
-
print("Refreshing environment credentials at startup for reliability...")
|
428 |
-
try:
|
429 |
-
credentials.refresh(GoogleAuthRequest())
|
430 |
-
# Note: We don't save environment credentials back to the env file
|
431 |
-
print("Startup token refresh successful for environment credentials.")
|
432 |
-
except Exception as refresh_error:
|
433 |
-
print(f"Startup token refresh failed for environment credentials: {refresh_error}. Credentials may be stale.")
|
434 |
-
# Continue with existing credentials - they might still work
|
435 |
-
else:
|
436 |
-
print("No refresh token available in environment credentials - using as-is.")
|
437 |
-
|
438 |
-
return credentials
|
439 |
-
except Exception as e:
|
440 |
-
print(f"Could not load credentials from GOOGLE_APPLICATION_CREDENTIALS: {e}")
|
441 |
-
|
442 |
-
# Fallback to cached credentials
|
443 |
-
if os.path.exists(CREDENTIAL_FILE):
|
444 |
-
try:
|
445 |
-
with open(CREDENTIAL_FILE, "r") as f:
|
446 |
-
creds_data = json.load(f)
|
447 |
-
|
448 |
-
print(f"DEBUG: Raw credential data from file: {json.dumps(creds_data, indent=2)}")
|
449 |
-
|
450 |
-
# Handle both old format (access_token) and new format (token)
|
451 |
-
if "access_token" in creds_data and "token" not in creds_data:
|
452 |
-
creds_data["token"] = creds_data["access_token"]
|
453 |
-
print("DEBUG: Converted access_token to token field")
|
454 |
-
|
455 |
-
# Handle both old format (scope as string) and new format (scopes as list)
|
456 |
-
if "scope" in creds_data and "scopes" not in creds_data:
|
457 |
-
creds_data["scopes"] = creds_data["scope"].split()
|
458 |
-
print("DEBUG: Converted scope string to scopes list")
|
459 |
-
|
460 |
-
credentials = Credentials.from_authorized_user_info(creds_data, SCOPES)
|
461 |
-
print("Loaded credentials from cache.")
|
462 |
-
print(f"DEBUG: Loaded credentials - Token: {credentials.token[:20] if credentials.token else 'None'}..., Expired: {credentials.expired}, Expiry: {credentials.expiry}")
|
463 |
-
|
464 |
-
# Manual expiry check to avoid timezone issues
|
465 |
-
if credentials.expiry:
|
466 |
-
from datetime import datetime, timezone
|
467 |
-
now = datetime.now(timezone.utc)
|
468 |
-
|
469 |
-
# Handle timezone-naive expiry by assuming it's UTC
|
470 |
-
if credentials.expiry.tzinfo is None:
|
471 |
-
expiry_utc = credentials.expiry.replace(tzinfo=timezone.utc)
|
472 |
-
else:
|
473 |
-
expiry_utc = credentials.expiry
|
474 |
-
|
475 |
-
time_until_expiry = expiry_utc - now
|
476 |
-
print(f"DEBUG: Current time: {now}")
|
477 |
-
print(f"DEBUG: Token expires at: {expiry_utc}")
|
478 |
-
print(f"DEBUG: Time until expiry: {time_until_expiry}")
|
479 |
-
|
480 |
-
# Override the expired property if the token is actually still valid
|
481 |
-
is_actually_expired = time_until_expiry.total_seconds() <= 0
|
482 |
-
print(f"DEBUG: Token is actually expired: {is_actually_expired}")
|
483 |
-
print(f"DEBUG: Google Auth Library says expired: {credentials.expired}")
|
484 |
-
|
485 |
-
if not is_actually_expired and credentials.token:
|
486 |
-
print("DEBUG: Token is valid, overriding expired status")
|
487 |
-
# Monkey patch the expired property to return False
|
488 |
-
credentials._expiry = expiry_utc
|
489 |
-
return credentials
|
490 |
-
|
491 |
-
# Always refresh tokens at startup when loading from file to avoid issues
|
492 |
-
if credentials.refresh_token:
|
493 |
-
print("Refreshing tokens at startup for reliability...")
|
494 |
-
try:
|
495 |
-
credentials.refresh(GoogleAuthRequest())
|
496 |
-
save_credentials(credentials)
|
497 |
-
print("Startup token refresh successful.")
|
498 |
-
except Exception as refresh_error:
|
499 |
-
print(f"Startup token refresh failed: {refresh_error}. Credentials may be stale.")
|
500 |
-
# Continue with existing credentials - they might still work
|
501 |
-
else:
|
502 |
-
print("No refresh token available - using cached credentials as-is.")
|
503 |
-
|
504 |
-
return credentials
|
505 |
-
except Exception as e:
|
506 |
-
print(f"Could not load cached credentials: {e}. Starting new login.")
|
507 |
-
|
508 |
-
# If no valid credentials, start new login flow
|
509 |
-
client_config = {
|
510 |
-
"installed": {
|
511 |
-
"client_id": CLIENT_ID,
|
512 |
-
"client_secret": CLIENT_SECRET,
|
513 |
-
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
514 |
-
"token_uri": "https://oauth2.googleapis.com/token",
|
515 |
-
}
|
516 |
-
}
|
517 |
-
|
518 |
-
# Create flow with include_granted_scopes to handle scope changes
|
519 |
-
flow = Flow.from_client_config(
|
520 |
-
client_config,
|
521 |
-
scopes=SCOPES,
|
522 |
-
redirect_uri="http://localhost:8080"
|
523 |
-
)
|
524 |
-
|
525 |
-
# Set include_granted_scopes to handle additional scopes gracefully
|
526 |
-
flow.oauth2session.scope = SCOPES
|
527 |
-
|
528 |
-
auth_url, _ = flow.authorization_url(
|
529 |
-
access_type="offline",
|
530 |
-
prompt="consent",
|
531 |
-
include_granted_scopes='true'
|
532 |
-
)
|
533 |
-
print(f"\nPlease open this URL in your browser to log in:\n{auth_url}\n")
|
534 |
-
|
535 |
-
server = HTTPServer(("", 8080), _OAuthCallbackHandler)
|
536 |
-
server.handle_request()
|
537 |
-
|
538 |
-
auth_code = _OAuthCallbackHandler.auth_code
|
539 |
-
if not auth_code:
|
540 |
-
print("Failed to retrieve authorization code.")
|
541 |
-
return None
|
542 |
-
|
543 |
-
# Monkey patch to handle scope validation warnings
|
544 |
-
import oauthlib.oauth2.rfc6749.parameters
|
545 |
-
original_validate = oauthlib.oauth2.rfc6749.parameters.validate_token_parameters
|
546 |
-
|
547 |
-
def patched_validate(params):
|
548 |
-
try:
|
549 |
-
return original_validate(params)
|
550 |
-
except Warning:
|
551 |
-
# Ignore scope change warnings
|
552 |
-
pass
|
553 |
-
|
554 |
-
oauthlib.oauth2.rfc6749.parameters.validate_token_parameters = patched_validate
|
555 |
-
|
556 |
-
try:
|
557 |
-
flow.fetch_token(code=auth_code)
|
558 |
-
credentials = flow.credentials
|
559 |
-
save_credentials(credentials)
|
560 |
-
print("Authentication successful! Credentials saved.")
|
561 |
-
return credentials
|
562 |
-
except Exception as e:
|
563 |
-
print(f"Authentication failed: {e}")
|
564 |
-
return None
|
565 |
-
finally:
|
566 |
-
# Restore original function
|
567 |
-
oauthlib.oauth2.rfc6749.parameters.validate_token_parameters = original_validate
|
568 |
-
|
569 |
-
|
570 |
-
@app.get("/v1/models")
|
571 |
-
@app.get("/v1beta/models")
|
572 |
-
async def list_models(request: Request, username: str = Depends(authenticate_user)):
|
573 |
-
"""List available models - matching gemini-cli supported models exactly."""
|
574 |
-
print(f"[GET] {request.url.path} - User: {username}")
|
575 |
-
print(f"[MODELS] Serving models list (both /v1/models and /v1beta/models return the same data)")
|
576 |
-
|
577 |
-
# Return all models supported by gemini-cli based on tokenLimits.ts
|
578 |
-
models_response = {
|
579 |
-
"models": [
|
580 |
-
{
|
581 |
-
"name": "models/gemini-1.5-pro",
|
582 |
-
"version": "001",
|
583 |
-
"displayName": "Gemini 1.5 Pro",
|
584 |
-
"description": "Mid-size multimodal model that supports up to 2 million tokens",
|
585 |
-
"inputTokenLimit": 2097152,
|
586 |
-
"outputTokenLimit": 8192,
|
587 |
-
"supportedGenerationMethods": ["generateContent", "streamGenerateContent"],
|
588 |
-
"temperature": 1.0,
|
589 |
-
"maxTemperature": 2.0,
|
590 |
-
"topP": 0.95,
|
591 |
-
"topK": 64
|
592 |
-
},
|
593 |
-
{
|
594 |
-
"name": "models/gemini-1.5-flash",
|
595 |
-
"version": "001",
|
596 |
-
"displayName": "Gemini 1.5 Flash",
|
597 |
-
"description": "Fast and versatile multimodal model for scaling across diverse tasks",
|
598 |
-
"inputTokenLimit": 1048576,
|
599 |
-
"outputTokenLimit": 8192,
|
600 |
-
"supportedGenerationMethods": ["generateContent", "streamGenerateContent"],
|
601 |
-
"temperature": 1.0,
|
602 |
-
"maxTemperature": 2.0,
|
603 |
-
"topP": 0.95,
|
604 |
-
"topK": 64
|
605 |
-
},
|
606 |
-
{
|
607 |
-
"name": "models/gemini-2.5-pro-preview-05-06",
|
608 |
-
"version": "001",
|
609 |
-
"displayName": "Gemini 2.5 Pro Preview 05-06",
|
610 |
-
"description": "Preview version of Gemini 2.5 Pro from May 6th",
|
611 |
-
"inputTokenLimit": 1048576,
|
612 |
-
"outputTokenLimit": 8192,
|
613 |
-
"supportedGenerationMethods": ["generateContent", "streamGenerateContent"],
|
614 |
-
"temperature": 1.0,
|
615 |
-
"maxTemperature": 2.0,
|
616 |
-
"topP": 0.95,
|
617 |
-
"topK": 64
|
618 |
-
},
|
619 |
-
{
|
620 |
-
"name": "models/gemini-2.5-pro-preview-06-05",
|
621 |
-
"version": "001",
|
622 |
-
"displayName": "Gemini 2.5 Pro Preview 06-05",
|
623 |
-
"description": "Preview version of Gemini 2.5 Pro from June 5th",
|
624 |
-
"inputTokenLimit": 1048576,
|
625 |
-
"outputTokenLimit": 8192,
|
626 |
-
"supportedGenerationMethods": ["generateContent", "streamGenerateContent"],
|
627 |
-
"temperature": 1.0,
|
628 |
-
"maxTemperature": 2.0,
|
629 |
-
"topP": 0.95,
|
630 |
-
"topK": 64
|
631 |
-
},
|
632 |
-
{
|
633 |
-
"name": "models/gemini-2.5-pro",
|
634 |
-
"version": "001",
|
635 |
-
"displayName": "Gemini 2.5 Pro",
|
636 |
-
"description": "Advanced multimodal model with enhanced capabilities",
|
637 |
-
"inputTokenLimit": 1048576,
|
638 |
-
"outputTokenLimit": 8192,
|
639 |
-
"supportedGenerationMethods": ["generateContent", "streamGenerateContent"],
|
640 |
-
"temperature": 1.0,
|
641 |
-
"maxTemperature": 2.0,
|
642 |
-
"topP": 0.95,
|
643 |
-
"topK": 64
|
644 |
-
},
|
645 |
-
{
|
646 |
-
"name": "models/gemini-2.5-flash-preview-05-20",
|
647 |
-
"version": "001",
|
648 |
-
"displayName": "Gemini 2.5 Flash Preview 05-20",
|
649 |
-
"description": "Preview version of Gemini 2.5 Flash from May 20th",
|
650 |
-
"inputTokenLimit": 1048576,
|
651 |
-
"outputTokenLimit": 8192,
|
652 |
-
"supportedGenerationMethods": ["generateContent", "streamGenerateContent"],
|
653 |
-
"temperature": 1.0,
|
654 |
-
"maxTemperature": 2.0,
|
655 |
-
"topP": 0.95,
|
656 |
-
"topK": 64
|
657 |
-
},
|
658 |
-
{
|
659 |
-
"name": "models/gemini-2.5-flash",
|
660 |
-
"version": "001",
|
661 |
-
"displayName": "Gemini 2.5 Flash",
|
662 |
-
"description": "Fast and efficient multimodal model with latest improvements",
|
663 |
-
"inputTokenLimit": 1048576,
|
664 |
-
"outputTokenLimit": 8192,
|
665 |
-
"supportedGenerationMethods": ["generateContent", "streamGenerateContent"],
|
666 |
-
"temperature": 1.0,
|
667 |
-
"maxTemperature": 2.0,
|
668 |
-
"topP": 0.95,
|
669 |
-
"topK": 64
|
670 |
-
},
|
671 |
-
{
|
672 |
-
"name": "models/gemini-2.0-flash",
|
673 |
-
"version": "001",
|
674 |
-
"displayName": "Gemini 2.0 Flash",
|
675 |
-
"description": "Latest generation fast multimodal model",
|
676 |
-
"inputTokenLimit": 1048576,
|
677 |
-
"outputTokenLimit": 8192,
|
678 |
-
"supportedGenerationMethods": ["generateContent", "streamGenerateContent"],
|
679 |
-
"temperature": 1.0,
|
680 |
-
"maxTemperature": 2.0,
|
681 |
-
"topP": 0.95,
|
682 |
-
"topK": 64
|
683 |
-
},
|
684 |
-
{
|
685 |
-
"name": "models/gemini-2.0-flash-preview-image-generation",
|
686 |
-
"version": "001",
|
687 |
-
"displayName": "Gemini 2.0 Flash Preview Image Generation",
|
688 |
-
"description": "Preview version with image generation capabilities",
|
689 |
-
"inputTokenLimit": 32000,
|
690 |
-
"outputTokenLimit": 8192,
|
691 |
-
"supportedGenerationMethods": ["generateContent", "streamGenerateContent"],
|
692 |
-
"temperature": 1.0,
|
693 |
-
"maxTemperature": 2.0,
|
694 |
-
"topP": 0.95,
|
695 |
-
"topK": 64
|
696 |
-
},
|
697 |
-
{
|
698 |
-
"name": "models/gemini-embedding-001",
|
699 |
-
"version": "001",
|
700 |
-
"displayName": "Gemini Embedding 001",
|
701 |
-
"description": "Text embedding model for semantic similarity and search",
|
702 |
-
"inputTokenLimit": 2048,
|
703 |
-
"outputTokenLimit": 1,
|
704 |
-
"supportedGenerationMethods": ["embedContent"],
|
705 |
-
"temperature": 0.0,
|
706 |
-
"maxTemperature": 0.0,
|
707 |
-
"topP": 1.0,
|
708 |
-
"topK": 1
|
709 |
-
}
|
710 |
-
]
|
711 |
-
}
|
712 |
-
|
713 |
-
return Response(content=json.dumps(models_response), status_code=200, media_type="application/json; charset=utf-8")
|
714 |
-
|
715 |
-
@app.options("/{full_path:path}")
|
716 |
-
async def handle_preflight(request: Request, full_path: str):
|
717 |
-
"""Handle CORS preflight requests without authentication."""
|
718 |
-
return Response(
|
719 |
-
status_code=200,
|
720 |
-
headers={
|
721 |
-
"Access-Control-Allow-Origin": "*",
|
722 |
-
"Access-Control-Allow-Methods": "GET, POST, PUT, DELETE, PATCH, OPTIONS",
|
723 |
-
"Access-Control-Allow-Headers": "*",
|
724 |
-
"Access-Control-Allow-Credentials": "true",
|
725 |
-
}
|
726 |
-
)
|
727 |
-
|
728 |
-
@app.api_route("/{full_path:path}", methods=["GET", "POST", "PUT", "DELETE", "PATCH"])
|
729 |
-
async def proxy_request(request: Request, full_path: str, username: str = Depends(authenticate_user)):
|
730 |
-
print(f"[{request.method}] /{full_path} - User: {username}")
|
731 |
-
|
732 |
-
creds = get_credentials()
|
733 |
-
if not creds:
|
734 |
-
print("❌ No credentials available")
|
735 |
-
return Response(content="Authentication failed. Please restart the proxy to log in.", status_code=500)
|
736 |
-
|
737 |
-
print(f"Using credentials - Token: {creds.token[:20] if creds.token else 'None'}..., Expired: {creds.expired}")
|
738 |
-
|
739 |
-
# Check if credentials need refreshing (only when expired)
|
740 |
-
if creds.expired and creds.refresh_token:
|
741 |
-
print("Credentials expired. Refreshing...")
|
742 |
-
try:
|
743 |
-
creds.refresh(GoogleAuthRequest())
|
744 |
-
save_credentials(creds)
|
745 |
-
print("Credentials refreshed successfully.")
|
746 |
-
except Exception as e:
|
747 |
-
print(f"Could not refresh token during request: {e}")
|
748 |
-
return Response(content="Token refresh failed. Please restart the proxy to re-authenticate.", status_code=500)
|
749 |
-
elif not creds.token:
|
750 |
-
print("No access token available.")
|
751 |
-
return Response(content="No access token. Please restart the proxy to re-authenticate.", status_code=500)
|
752 |
-
|
753 |
-
proj_id = get_user_project_id(creds)
|
754 |
-
if not proj_id:
|
755 |
-
return Response(content="Failed to get user project ID.", status_code=500)
|
756 |
-
|
757 |
-
onboard_user(creds, proj_id)
|
758 |
-
|
759 |
-
post_data = await request.body()
|
760 |
-
path = f"/{full_path}"
|
761 |
-
model_name_from_url = None
|
762 |
-
action = None
|
763 |
-
|
764 |
-
model_match = re.match(r"/(v\d+(?:beta)?)/models/([^:]+):(\w+)", path)
|
765 |
-
|
766 |
-
is_streaming = False
|
767 |
-
if model_match:
|
768 |
-
model_name_from_url = model_match.group(2)
|
769 |
-
action = model_match.group(3)
|
770 |
-
target_url = f"{CODE_ASSIST_ENDPOINT}/v1internal:{action}"
|
771 |
-
if "stream" in action.lower():
|
772 |
-
is_streaming = True
|
773 |
-
else:
|
774 |
-
target_url = f"{CODE_ASSIST_ENDPOINT}{path}"
|
775 |
-
|
776 |
-
# Remove authentication query parameters before forwarding to Google API
|
777 |
-
query_params = dict(request.query_params)
|
778 |
-
# Remove our authentication parameters
|
779 |
-
query_params.pop("key", None)
|
780 |
-
|
781 |
-
# For streaming requests, always ensure alt=sse is set
|
782 |
-
if is_streaming:
|
783 |
-
query_params["alt"] = "sse"
|
784 |
-
|
785 |
-
# Add remaining query parameters to target URL if any
|
786 |
-
if query_params:
|
787 |
-
from urllib.parse import urlencode
|
788 |
-
target_url += "?" + urlencode(query_params)
|
789 |
-
|
790 |
-
try:
|
791 |
-
incoming_json = json.loads(post_data)
|
792 |
-
final_model = model_name_from_url if model_match else incoming_json.get("model")
|
793 |
-
|
794 |
-
# Set default safety settings to BLOCK_NONE if not specified by user
|
795 |
-
safety_settings = incoming_json.get("safetySettings")
|
796 |
-
if not safety_settings:
|
797 |
-
safety_settings = [
|
798 |
-
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
799 |
-
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
800 |
-
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
|
801 |
-
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
802 |
-
{"category": "HARM_CATEGORY_CIVIC_INTEGRITY", "threshold": "BLOCK_NONE"}
|
803 |
-
]
|
804 |
-
|
805 |
-
structured_payload = {
|
806 |
-
"model": final_model,
|
807 |
-
"project": proj_id,
|
808 |
-
"request": {
|
809 |
-
"contents": incoming_json.get("contents"),
|
810 |
-
"systemInstruction": incoming_json.get("systemInstruction"),
|
811 |
-
"cachedContent": incoming_json.get("cachedContent"),
|
812 |
-
"tools": incoming_json.get("tools"),
|
813 |
-
"toolConfig": incoming_json.get("toolConfig"),
|
814 |
-
"safetySettings": safety_settings,
|
815 |
-
"generationConfig": incoming_json.get("generationConfig"),
|
816 |
-
},
|
817 |
-
}
|
818 |
-
structured_payload["request"] = {
|
819 |
-
k: v
|
820 |
-
for k, v in structured_payload["request"].items()
|
821 |
-
if v is not None
|
822 |
-
}
|
823 |
-
final_post_data = json.dumps(structured_payload)
|
824 |
-
except (json.JSONDecodeError, AttributeError):
|
825 |
-
final_post_data = post_data
|
826 |
-
|
827 |
-
request_headers = {
|
828 |
-
"Authorization": f"Bearer {creds.token}",
|
829 |
-
"Content-Type": "application/json",
|
830 |
-
"User-Agent": get_user_agent(),
|
831 |
-
}
|
832 |
-
|
833 |
-
if is_streaming:
|
834 |
-
async def stream_generator():
|
835 |
-
try:
|
836 |
-
print(f"[STREAM] Starting streaming request to: {target_url}")
|
837 |
-
print(f"[STREAM] Request payload size: {len(final_post_data)} bytes")
|
838 |
-
print(f"[STREAM] Authorization header: Bearer {creds.token[:50]}...")
|
839 |
-
print(f"[STREAM] Full headers being sent: {request_headers}")
|
840 |
-
|
841 |
-
# Make the initial streaming request
|
842 |
-
resp = requests.post(target_url, data=final_post_data, headers=request_headers, stream=True)
|
843 |
-
print(f"[STREAM] Response status: {resp.status_code}")
|
844 |
-
print(f"[STREAM] Response headers: {dict(resp.headers)}")
|
845 |
-
|
846 |
-
# If we get a 401, try refreshing the token once
|
847 |
-
if resp.status_code == 401:
|
848 |
-
print("[STREAM] Received 401 from Google API. Attempting token refresh...")
|
849 |
-
print(f"[STREAM] Response text: {resp.text}")
|
850 |
-
|
851 |
-
if creds.refresh_token:
|
852 |
-
try:
|
853 |
-
creds.refresh(GoogleAuthRequest())
|
854 |
-
save_credentials(creds)
|
855 |
-
print("[STREAM] Token refreshed successfully. Retrying request...")
|
856 |
-
|
857 |
-
# Update headers with new token
|
858 |
-
request_headers["Authorization"] = f"Bearer {creds.token}"
|
859 |
-
|
860 |
-
# Retry the request with refreshed token
|
861 |
-
resp = requests.post(target_url, data=final_post_data, headers=request_headers, stream=True)
|
862 |
-
print(f"[STREAM] Retry response status: {resp.status_code}")
|
863 |
-
|
864 |
-
if resp.status_code == 401:
|
865 |
-
print("[STREAM] Still getting 401 after token refresh.")
|
866 |
-
yield f'data: {{"error": {{"message": "Authentication failed even after token refresh. Please restart the proxy to re-authenticate."}}}}\n\n'.encode('utf-8')
|
867 |
-
return
|
868 |
-
except Exception as refresh_error:
|
869 |
-
print(f"[STREAM] Token refresh failed: {refresh_error}")
|
870 |
-
yield f'data: {{"error": {{"message": "Token refresh failed. Please restart the proxy to re-authenticate."}}}}\n\n'.encode('utf-8')
|
871 |
-
return
|
872 |
-
else:
|
873 |
-
print("[STREAM] No refresh token available.")
|
874 |
-
yield f'data: {{"error": {{"message": "Authentication failed. Please restart the proxy to re-authenticate."}}}}\n\n'.encode('utf-8')
|
875 |
-
return
|
876 |
-
|
877 |
-
with resp:
|
878 |
-
resp.raise_for_status()
|
879 |
-
|
880 |
-
# Process exactly like the real Gemini SDK
|
881 |
-
print("[STREAM] Processing with Gemini SDK-compatible logic")
|
882 |
-
|
883 |
-
# Use iter_lines() exactly like the real Gemini SDK (without decode_unicode)
|
884 |
-
# This should be non-blocking and yield lines as they arrive
|
885 |
-
for chunk in resp.iter_lines():
|
886 |
-
if chunk:
|
887 |
-
# Decode UTF-8 if it's bytes (matching SDK logic exactly)
|
888 |
-
if not isinstance(chunk, str):
|
889 |
-
chunk = chunk.decode('utf-8')
|
890 |
-
|
891 |
-
# Strip 'data: ' prefix if present (matching SDK logic)
|
892 |
-
if chunk.startswith('data: '):
|
893 |
-
chunk = chunk[len('data: '):]
|
894 |
-
|
895 |
-
try:
|
896 |
-
# Parse the JSON from Google's internal API
|
897 |
-
obj = json.loads(chunk)
|
898 |
-
|
899 |
-
# Convert Google's internal format to standard Gemini format
|
900 |
-
if "response" in obj:
|
901 |
-
response_chunk = obj["response"]
|
902 |
-
# Output in standard Gemini streaming format
|
903 |
-
response_json = json.dumps(response_chunk, separators=(',', ':'))
|
904 |
-
# Encode back to UTF-8 bytes to match exactly what real Gemini API sends
|
905 |
-
response_line = f"data: {response_json}\n\n"
|
906 |
-
yield response_line.encode('utf-8')
|
907 |
-
except json.JSONDecodeError:
|
908 |
-
# Skip invalid JSON
|
909 |
-
continue
|
910 |
-
|
911 |
-
except requests.exceptions.RequestException as e:
|
912 |
-
print(f"Error during streaming request: {e}")
|
913 |
-
# Format error as real Gemini API would
|
914 |
-
yield f'data: {{"error": {{"message": "Upstream request failed: {str(e)}"}}}}\n\n'.encode('utf-8')
|
915 |
-
except Exception as e:
|
916 |
-
print(f"An unexpected error occurred during streaming: {e}")
|
917 |
-
# Format error as real Gemini API would
|
918 |
-
yield f'data: {{"error": {{"message": "An unexpected error occurred: {str(e)}"}}}}\n\n'.encode('utf-8')
|
919 |
-
|
920 |
-
# Create the streaming response with headers matching real Gemini API
|
921 |
-
response_headers = {
|
922 |
-
"Content-Type": "text/event-stream",
|
923 |
-
"Content-Disposition": "attachment",
|
924 |
-
"Vary": "Origin, X-Origin, Referer",
|
925 |
-
"X-XSS-Protection": "0",
|
926 |
-
"X-Frame-Options": "SAMEORIGIN",
|
927 |
-
"X-Content-Type-Options": "nosniff",
|
928 |
-
"Server": "ESF"
|
929 |
-
}
|
930 |
-
|
931 |
-
response = StreamingResponse(
|
932 |
-
stream_generator(),
|
933 |
-
media_type="text/event-stream",
|
934 |
-
headers=response_headers
|
935 |
-
)
|
936 |
-
|
937 |
-
return response
|
938 |
-
else:
|
939 |
-
# Make the request
|
940 |
-
print(f"[NON-STREAM] Starting request to: {target_url}")
|
941 |
-
print(f"[NON-STREAM] Authorization header: Bearer {creds.token[:50]}...")
|
942 |
-
print(f"[NON-STREAM] Full headers being sent: {request_headers}")
|
943 |
-
|
944 |
-
resp = requests.post(target_url, data=final_post_data, headers=request_headers)
|
945 |
-
|
946 |
-
print(f"[NON-STREAM] Response status: {resp.status_code}")
|
947 |
-
print(f"[NON-STREAM] Response headers: {dict(resp.headers)}")
|
948 |
-
|
949 |
-
# If we get a 401, try refreshing the token once
|
950 |
-
if resp.status_code == 401:
|
951 |
-
print("Received 401 from Google API. Attempting token refresh...")
|
952 |
-
print(f"Response text: {resp.text}")
|
953 |
-
|
954 |
-
if creds.refresh_token:
|
955 |
-
try:
|
956 |
-
creds.refresh(GoogleAuthRequest())
|
957 |
-
save_credentials(creds)
|
958 |
-
print("Token refreshed successfully. Retrying request...")
|
959 |
-
|
960 |
-
# Update headers with new token
|
961 |
-
request_headers["Authorization"] = f"Bearer {creds.token}"
|
962 |
-
|
963 |
-
# Retry the request with refreshed token
|
964 |
-
resp = requests.post(target_url, data=final_post_data, headers=request_headers)
|
965 |
-
print(f"Retry response status: {resp.status_code}")
|
966 |
-
|
967 |
-
if resp.status_code == 401:
|
968 |
-
print("Still getting 401 after token refresh.")
|
969 |
-
return Response(content="Authentication failed even after token refresh. Please restart the proxy to re-authenticate.", status_code=500)
|
970 |
-
except Exception as refresh_error:
|
971 |
-
print(f"Token refresh failed: {refresh_error}")
|
972 |
-
return Response(content="Token refresh failed. Please restart the proxy to re-authenticate.", status_code=500)
|
973 |
-
else:
|
974 |
-
print("No refresh token available.")
|
975 |
-
return Response(content="Authentication failed. Please restart the proxy to re-authenticate.", status_code=500)
|
976 |
-
|
977 |
-
if resp.status_code == 200:
|
978 |
-
try:
|
979 |
-
google_api_response = resp.json()
|
980 |
-
# The actual response is nested under the "response" key
|
981 |
-
standard_gemini_response = google_api_response.get("response")
|
982 |
-
# Return the response object directly, not wrapped in a list
|
983 |
-
return Response(content=json.dumps(standard_gemini_response), status_code=200, media_type="application/json; charset=utf-8")
|
984 |
-
except (json.JSONDecodeError, AttributeError) as e:
|
985 |
-
print(f"Error converting to standard Gemini format: {e}")
|
986 |
-
# Fallback to sending the original content if conversion fails
|
987 |
-
return Response(content=resp.content, status_code=resp.status_code, media_type=resp.headers.get("Content-Type"))
|
988 |
-
else:
|
989 |
-
return Response(content=resp.content, status_code=resp.status_code, media_type=resp.headers.get("Content-Type"))
|
990 |
-
|
991 |
-
|
992 |
-
if __name__ == "__main__":
|
993 |
-
print("Initializing credentials...")
|
994 |
-
creds = get_credentials()
|
995 |
-
if creds:
|
996 |
-
proj_id = get_user_project_id(creds)
|
997 |
-
if proj_id:
|
998 |
-
onboard_user(creds, proj_id)
|
999 |
-
print(f"\nStarting Gemini proxy server on http://localhost:{GEMINI_PORT}")
|
1000 |
-
print("Send your Gemini API requests to this address.")
|
1001 |
-
print(f"Authentication required - Password: {GEMINI_AUTH_PASSWORD}")
|
1002 |
-
print("Use HTTP Basic Authentication with any username and the password above.")
|
1003 |
-
uvicorn.run(app, host="0.0.0.0", port=GEMINI_PORT)
|
1004 |
-
else:
|
1005 |
-
print("\nCould not obtain credentials. Please authenticate and restart the server.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
fastapi
|
2 |
uvicorn
|
3 |
-
google-auth
|
4 |
-
google-auth-oauthlib
|
5 |
requests
|
6 |
-
|
7 |
-
|
|
|
|
1 |
fastapi
|
2 |
uvicorn
|
|
|
|
|
3 |
requests
|
4 |
+
python-dotenv
|
5 |
+
google-auth-oauthlib
|
6 |
+
ijson
|