bibibi12345 commited on
Commit
c3b0824
·
1 Parent(s): 32a0c88

added better logging

Browse files
Files changed (4) hide show
  1. src/auth.py +4 -3
  2. src/google_api_client.py +3 -0
  3. src/main.py +16 -7
  4. src/openai_routes.py +156 -85
src/auth.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  import json
3
  import base64
4
  import time
 
5
  from datetime import datetime
6
  from fastapi import Request, HTTPException, Depends
7
  from fastapi.security import HTTPBasic
@@ -195,7 +196,7 @@ def get_credentials():
195
  prompt="consent",
196
  include_granted_scopes='true'
197
  )
198
- print(f"\nPlease open this URL in your browser to log in:\n{auth_url}\n")
199
 
200
  server = HTTPServer(("", 8080), _OAuthCallbackHandler)
201
  server.handle_request()
@@ -220,10 +221,10 @@ def get_credentials():
220
  credentials = flow.credentials
221
  credentials_from_env = False # Mark as file-based credentials
222
  save_credentials(credentials)
223
- print("Authentication successful! Credentials saved.")
224
  return credentials
225
  except Exception as e:
226
- print(f"Authentication failed: {e}")
227
  return None
228
  finally:
229
  oauthlib.oauth2.rfc6749.parameters.validate_token_parameters = original_validate
 
2
  import json
3
  import base64
4
  import time
5
+ import logging
6
  from datetime import datetime
7
  from fastapi import Request, HTTPException, Depends
8
  from fastapi.security import HTTPBasic
 
196
  prompt="consent",
197
  include_granted_scopes='true'
198
  )
199
+ logging.info(f"Please open this URL in your browser to log in: {auth_url}")
200
 
201
  server = HTTPServer(("", 8080), _OAuthCallbackHandler)
202
  server.handle_request()
 
221
  credentials = flow.credentials
222
  credentials_from_env = False # Mark as file-based credentials
223
  save_credentials(credentials)
224
+ logging.info("Authentication successful! Credentials saved.")
225
  return credentials
226
  except Exception as e:
227
+ logging.error(f"Authentication failed: {e}")
228
  return None
229
  finally:
230
  oauthlib.oauth2.rfc6749.parameters.validate_token_parameters = original_validate
src/google_api_client.py CHANGED
@@ -115,6 +115,9 @@ def _handle_streaming_response(resp) -> StreamingResponse:
115
  response_line = f"data: {response_json}\n\n"
116
  yield response_line
117
  await asyncio.sleep(0)
 
 
 
118
  except json.JSONDecodeError:
119
  continue
120
 
 
115
  response_line = f"data: {response_json}\n\n"
116
  yield response_line
117
  await asyncio.sleep(0)
118
+ else:
119
+ obj_json = json.dumps(obj, separators=(',', ':'))
120
+ yield f"data: {obj_json}\n\n"
121
  except json.JSONDecodeError:
122
  continue
123
 
src/main.py CHANGED
@@ -1,9 +1,16 @@
 
1
  from fastapi import FastAPI, Request, Response
2
  from fastapi.middleware.cors import CORSMiddleware
3
  from .gemini_routes import router as gemini_router
4
  from .openai_routes import router as openai_router
5
  from .auth import get_credentials, get_user_project_id, onboard_user
6
 
 
 
 
 
 
 
7
  app = FastAPI()
8
 
9
  # Add CORS middleware for preflight requests
@@ -18,22 +25,24 @@ app.add_middleware(
18
  @app.on_event("startup")
19
  async def startup_event():
20
  try:
 
21
  creds = get_credentials()
22
  if creds:
23
  try:
24
  proj_id = get_user_project_id(creds)
25
  if proj_id:
26
  onboard_user(creds, proj_id)
27
- print("Gemini proxy server started")
28
- print("Authentication required - Password: see .env file")
 
29
  except Exception as e:
30
- print(f"Setup failed: {str(e)}")
31
- print("Server started but may not function properly until setup issues are resolved.")
32
  else:
33
- print("Could not obtain credentials. Please authenticate and restart the server.")
34
  except Exception as e:
35
- print(f"Startup error: {str(e)}")
36
- print("Server may not function properly.")
37
 
38
  @app.options("/{full_path:path}")
39
  async def handle_preflight(request: Request, full_path: str):
 
1
+ import logging
2
  from fastapi import FastAPI, Request, Response
3
  from fastapi.middleware.cors import CORSMiddleware
4
  from .gemini_routes import router as gemini_router
5
  from .openai_routes import router as openai_router
6
  from .auth import get_credentials, get_user_project_id, onboard_user
7
 
8
+ # Configure logging
9
+ logging.basicConfig(
10
+ level=logging.INFO,
11
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
12
+ )
13
+
14
  app = FastAPI()
15
 
16
  # Add CORS middleware for preflight requests
 
25
  @app.on_event("startup")
26
  async def startup_event():
27
  try:
28
+ logging.info("Starting Gemini proxy server...")
29
  creds = get_credentials()
30
  if creds:
31
  try:
32
  proj_id = get_user_project_id(creds)
33
  if proj_id:
34
  onboard_user(creds, proj_id)
35
+ logging.info(f"Successfully onboarded with project ID: {proj_id}")
36
+ logging.info("Gemini proxy server started successfully")
37
+ logging.info("Authentication required - Password: see .env file")
38
  except Exception as e:
39
+ logging.error(f"Setup failed: {str(e)}")
40
+ logging.warning("Server started but may not function properly until setup issues are resolved.")
41
  else:
42
+ logging.error("Could not obtain credentials. Please authenticate and restart the server.")
43
  except Exception as e:
44
+ logging.error(f"Startup error: {str(e)}")
45
+ logging.warning("Server may not function properly.")
46
 
47
  @app.options("/{full_path:path}")
48
  async def handle_preflight(request: Request, full_path: str):
src/openai_routes.py CHANGED
@@ -6,6 +6,7 @@ and delegate to the Google API client.
6
  import json
7
  import uuid
8
  import asyncio
 
9
  from fastapi import APIRouter, Request, Response, Depends
10
  from fastapi.responses import StreamingResponse
11
 
@@ -29,55 +30,92 @@ async def openai_chat_completions(
29
  ):
30
  """
31
  OpenAI-compatible chat completions endpoint.
32
- Transforms OpenAI requests to Gemini format, sends to Google API,
33
  and transforms responses back to OpenAI format.
34
  """
35
 
36
- # Transform OpenAI request to Gemini format
37
- gemini_request_data = openai_request_to_gemini(request)
38
-
39
- # Build the payload for Google API
40
- gemini_payload = build_gemini_payload_from_openai(gemini_request_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  if request.stream:
43
  # Handle streaming response
44
  async def openai_stream_generator():
45
- response = send_gemini_request(gemini_payload, is_streaming=True)
46
-
47
- if isinstance(response, StreamingResponse):
48
- response_id = "chatcmpl-" + str(uuid.uuid4())
49
 
50
- async for chunk in response.body_iterator:
51
- if isinstance(chunk, bytes):
52
- chunk = chunk.decode('utf-8')
53
 
54
- if chunk.startswith('data: '):
55
- try:
56
- # Parse the Gemini streaming chunk
57
- chunk_data = chunk[6:] # Remove 'data: ' prefix
58
- gemini_chunk = json.loads(chunk_data)
59
-
60
- # Transform to OpenAI format
61
- openai_chunk = gemini_stream_chunk_to_openai(
62
- gemini_chunk,
63
- request.model,
64
- response_id
65
- )
66
-
67
- # Send as OpenAI streaming format
68
- yield f"data: {json.dumps(openai_chunk)}\n\n"
69
- await asyncio.sleep(0)
70
-
71
- except (json.JSONDecodeError, KeyError, UnicodeDecodeError) as e:
72
- continue
73
-
74
- # Send the final [DONE] marker
75
- yield "data: [DONE]\n\n"
76
- else:
77
- # Error case - forward the error response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  error_data = {
79
  "error": {
80
- "message": "Streaming request failed",
81
  "type": "api_error"
82
  }
83
  }
@@ -91,24 +129,40 @@ async def openai_chat_completions(
91
 
92
  else:
93
  # Handle non-streaming response
94
- response = send_gemini_request(gemini_payload, is_streaming=False)
95
-
96
- if isinstance(response, Response) and response.status_code != 200:
97
- # Forward error responses as-is
98
- return response
99
-
100
  try:
101
- # Parse Gemini response and transform to OpenAI format
102
- gemini_response = json.loads(response.body)
103
- openai_response = gemini_response_to_openai(gemini_response, request.model)
104
 
105
- return openai_response
 
 
 
106
 
107
- except (json.JSONDecodeError, AttributeError) as e:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  return Response(
109
  content=json.dumps({
110
  "error": {
111
- "message": "Failed to process response",
112
  "type": "api_error"
113
  }
114
  }),
@@ -124,41 +178,58 @@ async def openai_list_models(username: str = Depends(authenticate_user)):
124
  Returns available models in OpenAI format.
125
  """
126
 
127
- # Convert our Gemini models to OpenAI format
128
- from .config import SUPPORTED_MODELS
129
-
130
- openai_models = []
131
- for model in SUPPORTED_MODELS:
132
- # Remove "models/" prefix for OpenAI compatibility
133
- model_id = model["name"].replace("models/", "")
134
- openai_models.append({
135
- "id": model_id,
136
- "object": "model",
137
- "created": 1677610602, # Static timestamp
138
- "owned_by": "google",
139
- "permission": [
140
- {
141
- "id": "modelperm-" + model_id.replace("/", "-"),
142
- "object": "model_permission",
143
- "created": 1677610602,
144
- "allow_create_engine": False,
145
- "allow_sampling": True,
146
- "allow_logprobs": False,
147
- "allow_search_indices": False,
148
- "allow_view": True,
149
- "allow_fine_tuning": False,
150
- "organization": "*",
151
- "group": None,
152
- "is_blocking": False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  }
154
- ],
155
- "root": model_id,
156
- "parent": None
157
- })
158
-
159
- return {
160
- "object": "list",
161
- "data": openai_models
162
- }
163
 
164
 
 
6
  import json
7
  import uuid
8
  import asyncio
9
+ import logging
10
  from fastapi import APIRouter, Request, Response, Depends
11
  from fastapi.responses import StreamingResponse
12
 
 
30
  ):
31
  """
32
  OpenAI-compatible chat completions endpoint.
33
+ Transforms OpenAI requests to Gemini format, sends to Google API,
34
  and transforms responses back to OpenAI format.
35
  """
36
 
37
+ try:
38
+ logging.info(f"OpenAI chat completion request: model={request.model}, stream={request.stream}")
39
+
40
+ # Transform OpenAI request to Gemini format
41
+ gemini_request_data = openai_request_to_gemini(request)
42
+
43
+ # Build the payload for Google API
44
+ gemini_payload = build_gemini_payload_from_openai(gemini_request_data)
45
+
46
+ except Exception as e:
47
+ logging.error(f"Error processing OpenAI request: {str(e)}")
48
+ return Response(
49
+ content=json.dumps({
50
+ "error": {
51
+ "message": f"Request processing failed: {str(e)}",
52
+ "type": "invalid_request_error"
53
+ }
54
+ }),
55
+ status_code=400,
56
+ media_type="application/json"
57
+ )
58
 
59
  if request.stream:
60
  # Handle streaming response
61
  async def openai_stream_generator():
62
+ try:
63
+ response = send_gemini_request(gemini_payload, is_streaming=True)
 
 
64
 
65
+ if isinstance(response, StreamingResponse):
66
+ response_id = "chatcmpl-" + str(uuid.uuid4())
67
+ logging.info(f"Starting streaming response: {response_id}")
68
 
69
+ async for chunk in response.body_iterator:
70
+ if isinstance(chunk, bytes):
71
+ chunk = chunk.decode('utf-8')
72
+
73
+ if chunk.startswith('data: '):
74
+ try:
75
+ # Parse the Gemini streaming chunk
76
+ chunk_data = chunk[6:] # Remove 'data: ' prefix
77
+ gemini_chunk = json.loads(chunk_data)
78
+
79
+ # Transform to OpenAI format
80
+ openai_chunk = gemini_stream_chunk_to_openai(
81
+ gemini_chunk,
82
+ request.model,
83
+ response_id
84
+ )
85
+
86
+ # Send as OpenAI streaming format
87
+ yield f"data: {json.dumps(openai_chunk)}\n\n"
88
+ await asyncio.sleep(0)
89
+
90
+ except (json.JSONDecodeError, KeyError, UnicodeDecodeError) as e:
91
+ logging.warning(f"Failed to parse streaming chunk: {str(e)}")
92
+ continue
93
+
94
+ # Send the final [DONE] marker
95
+ yield "data: [DONE]\n\n"
96
+ logging.info(f"Completed streaming response: {response_id}")
97
+ else:
98
+ # Error case - log and forward the error response
99
+ error_msg = "Streaming request failed"
100
+ if hasattr(response, 'status_code'):
101
+ error_msg += f" (status: {response.status_code})"
102
+ if hasattr(response, 'body'):
103
+ error_msg += f" (body: {response.body})"
104
+
105
+ logging.error(error_msg)
106
+ error_data = {
107
+ "error": {
108
+ "message": error_msg,
109
+ "type": "api_error"
110
+ }
111
+ }
112
+ yield f"data: {json.dumps(error_data)}\n\n"
113
+ yield "data: [DONE]\n\n"
114
+ except Exception as e:
115
+ logging.error(f"Streaming error: {str(e)}")
116
  error_data = {
117
  "error": {
118
+ "message": f"Streaming failed: {str(e)}",
119
  "type": "api_error"
120
  }
121
  }
 
129
 
130
  else:
131
  # Handle non-streaming response
 
 
 
 
 
 
132
  try:
133
+ response = send_gemini_request(gemini_payload, is_streaming=False)
 
 
134
 
135
+ if isinstance(response, Response) and response.status_code != 200:
136
+ # Log and forward error responses
137
+ logging.error(f"Gemini API error: status={response.status_code}, body={response.body}")
138
+ return response
139
 
140
+ try:
141
+ # Parse Gemini response and transform to OpenAI format
142
+ gemini_response = json.loads(response.body)
143
+ openai_response = gemini_response_to_openai(gemini_response, request.model)
144
+
145
+ logging.info(f"Successfully processed non-streaming response for model: {request.model}")
146
+ return openai_response
147
+
148
+ except (json.JSONDecodeError, AttributeError) as e:
149
+ logging.error(f"Failed to parse Gemini response: {str(e)}")
150
+ return Response(
151
+ content=json.dumps({
152
+ "error": {
153
+ "message": f"Failed to process response: {str(e)}",
154
+ "type": "api_error"
155
+ }
156
+ }),
157
+ status_code=500,
158
+ media_type="application/json"
159
+ )
160
+ except Exception as e:
161
+ logging.error(f"Non-streaming request failed: {str(e)}")
162
  return Response(
163
  content=json.dumps({
164
  "error": {
165
+ "message": f"Request failed: {str(e)}",
166
  "type": "api_error"
167
  }
168
  }),
 
178
  Returns available models in OpenAI format.
179
  """
180
 
181
+ try:
182
+ logging.info("OpenAI models list requested")
183
+
184
+ # Convert our Gemini models to OpenAI format
185
+ from .config import SUPPORTED_MODELS
186
+
187
+ openai_models = []
188
+ for model in SUPPORTED_MODELS:
189
+ # Remove "models/" prefix for OpenAI compatibility
190
+ model_id = model["name"].replace("models/", "")
191
+ openai_models.append({
192
+ "id": model_id,
193
+ "object": "model",
194
+ "created": 1677610602, # Static timestamp
195
+ "owned_by": "google",
196
+ "permission": [
197
+ {
198
+ "id": "modelperm-" + model_id.replace("/", "-"),
199
+ "object": "model_permission",
200
+ "created": 1677610602,
201
+ "allow_create_engine": False,
202
+ "allow_sampling": True,
203
+ "allow_logprobs": False,
204
+ "allow_search_indices": False,
205
+ "allow_view": True,
206
+ "allow_fine_tuning": False,
207
+ "organization": "*",
208
+ "group": None,
209
+ "is_blocking": False
210
+ }
211
+ ],
212
+ "root": model_id,
213
+ "parent": None
214
+ })
215
+
216
+ logging.info(f"Returning {len(openai_models)} models")
217
+ return {
218
+ "object": "list",
219
+ "data": openai_models
220
+ }
221
+
222
+ except Exception as e:
223
+ logging.error(f"Failed to list models: {str(e)}")
224
+ return Response(
225
+ content=json.dumps({
226
+ "error": {
227
+ "message": f"Failed to list models: {str(e)}",
228
+ "type": "api_error"
229
  }
230
+ }),
231
+ status_code=500,
232
+ media_type="application/json"
233
+ )
 
 
 
 
 
234
 
235