Niansuh commited on
Commit
8c22121
·
verified ·
1 Parent(s): 734aed4

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +49 -78
api/utils.py CHANGED
@@ -3,91 +3,34 @@ import json
3
  import uuid
4
  import asyncio
5
  import random
6
- from typing import Any, Dict, Optional
7
- import httpx
8
  from fastapi import HTTPException, Request
9
- from dotenv import load_dotenv
10
- from api import validate
11
  from api.config import (
12
  MODEL_MAPPING,
13
  get_headers_api_chat,
14
- get_headers_chat,
15
  BASE_URL,
16
  AGENT_MODE,
17
  TRENDING_AGENT_MODE,
18
  MODEL_PREFIXES
19
  )
20
- from api.models import ChatRequest
21
  from api.logger import setup_logger
22
- from api.rpmlimits import check_rate_limit, get_client_ip # Import rate limit functions
23
 
24
- # Initialize environment variables and logger
25
- load_dotenv()
26
  logger = setup_logger(__name__)
27
 
28
- def create_chat_completion_data(
29
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
30
- ) -> Dict[str, Any]:
31
- return {
32
- "id": f"chatcmpl-{uuid.uuid4()}",
33
- "object": "chat.completion.chunk",
34
- "created": timestamp,
35
- "model": model,
36
- "choices": [
37
- {
38
- "index": 0,
39
- "delta": {"content": content, "role": "assistant"},
40
- "finish_reason": finish_reason,
41
- }
42
- ],
43
- "usage": None,
44
- }
45
-
46
- def message_to_dict(message, model_prefix: Optional[str] = None):
47
- content = message.content if isinstance(message.content, str) else message.content[0]["text"]
48
- if model_prefix:
49
- content = f"{model_prefix} {content}"
50
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
51
- return {
52
- "role": message.role,
53
- "content": content,
54
- "data": {
55
- "imageBase64": message.content[1]["image_url"]["url"],
56
- "fileText": "",
57
- "title": "snapshot",
58
- },
59
- }
60
- return {"role": message.role, "content": content}
61
-
62
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
63
- if model_prefix and content.startswith(model_prefix):
64
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
65
- return content[len(model_prefix):].strip()
66
- return content
67
-
68
- def get_referer_url() -> str:
69
- return BASE_URL
70
-
71
  async def process_streaming_response(request: ChatRequest, request_obj: Request):
72
- referer_url = get_referer_url()
73
- logger.info(f"Processing streaming response - Model: {request.model} - URL: {referer_url}")
74
-
75
- client_ip = get_client_ip(request_obj)
76
- check_rate_limit(client_ip)
77
 
78
  agent_mode = AGENT_MODE.get(request.model, {})
79
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
80
  model_prefix = MODEL_PREFIXES.get(request.model, "")
81
 
82
  headers_api_chat = get_headers_api_chat(referer_url)
83
- validated_token = validate.getHid()
84
  logger.info(f"Retrieved validated token for IP {client_ip}: {validated_token}")
85
 
86
- if request.model == 'o1-preview':
87
- delay_seconds = random.randint(1, 60)
88
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
89
- await asyncio.sleep(delay_seconds)
90
-
91
  json_data = {
92
  "agentMode": agent_mode,
93
  "clickedAnswer2": False,
@@ -127,9 +70,7 @@ async def process_streaming_response(request: ChatRequest, request_obj: Request)
127
  content = line
128
  if content.startswith("$@$v=undefined-rv1$@$"):
129
  content = content[21:]
130
- cleaned_content = strip_model_prefix(content, model_prefix)
131
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
132
-
133
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
134
  yield "data: [DONE]\n\n"
135
  except httpx.HTTPStatusError as e:
@@ -140,24 +81,16 @@ async def process_streaming_response(request: ChatRequest, request_obj: Request)
140
  raise HTTPException(status_code=500, detail=str(e))
141
 
142
  async def process_non_streaming_response(request: ChatRequest, request_obj: Request):
143
- referer_url = get_referer_url()
144
- logger.info(f"Processing non-streaming response - Model: {request.model} - URL: {referer_url}")
145
-
146
- client_ip = get_client_ip(request_obj)
147
- check_rate_limit(client_ip)
148
 
149
  agent_mode = AGENT_MODE.get(request.model, {})
150
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
151
  model_prefix = MODEL_PREFIXES.get(request.model, "")
152
 
153
  headers_api_chat = get_headers_api_chat(referer_url)
154
- headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
155
- validated_token = validate.getHid()
156
-
157
- if request.model == 'o1-preview':
158
- delay_seconds = random.randint(20, 60)
159
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
160
- await asyncio.sleep(delay_seconds)
161
 
162
  json_data = {
163
  "agentMode": agent_mode,
@@ -216,3 +149,41 @@ async def process_non_streaming_response(request: ChatRequest, request_obj: Requ
216
  ],
217
  "usage": None,
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import uuid
4
  import asyncio
5
  import random
 
 
6
  from fastapi import HTTPException, Request
7
+ import httpx
 
8
  from api.config import (
9
  MODEL_MAPPING,
10
  get_headers_api_chat,
 
11
  BASE_URL,
12
  AGENT_MODE,
13
  TRENDING_AGENT_MODE,
14
  MODEL_PREFIXES
15
  )
 
16
  from api.logger import setup_logger
17
+ from api import validate
18
 
 
 
19
  logger = setup_logger(__name__)
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  async def process_streaming_response(request: ChatRequest, request_obj: Request):
22
+ referer_url = BASE_URL
23
+ client_ip = request_obj.client.host # Get the client IP
24
+ logger.info(f"Processing streaming response - Model: {request.model} - URL: {referer_url} - IP: {client_ip}")
 
 
25
 
26
  agent_mode = AGENT_MODE.get(request.model, {})
27
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
28
  model_prefix = MODEL_PREFIXES.get(request.model, "")
29
 
30
  headers_api_chat = get_headers_api_chat(referer_url)
31
+ validated_token = validate.getHid() # Get validated token
32
  logger.info(f"Retrieved validated token for IP {client_ip}: {validated_token}")
33
 
 
 
 
 
 
34
  json_data = {
35
  "agentMode": agent_mode,
36
  "clickedAnswer2": False,
 
70
  content = line
71
  if content.startswith("$@$v=undefined-rv1$@$"):
72
  content = content[21:]
73
+ yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
 
 
74
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
75
  yield "data: [DONE]\n\n"
76
  except httpx.HTTPStatusError as e:
 
81
  raise HTTPException(status_code=500, detail=str(e))
82
 
83
  async def process_non_streaming_response(request: ChatRequest, request_obj: Request):
84
+ referer_url = BASE_URL
85
+ client_ip = request_obj.client.host
86
+ logger.info(f"Processing non-streaming response - Model: {request.model} - URL: {referer_url} - IP: {client_ip}")
 
 
87
 
88
  agent_mode = AGENT_MODE.get(request.model, {})
89
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
90
  model_prefix = MODEL_PREFIXES.get(request.model, "")
91
 
92
  headers_api_chat = get_headers_api_chat(referer_url)
93
+ validated_token = validate.getHid() # Get validated token
 
 
 
 
 
 
94
 
95
  json_data = {
96
  "agentMode": agent_mode,
 
149
  ],
150
  "usage": None,
151
  }
152
+
153
+ def create_chat_completion_data(content: str, model: str, timestamp: int, finish_reason: Optional[str] = None):
154
+ return {
155
+ "id": f"chatcmpl-{uuid.uuid4()}",
156
+ "object": "chat.completion.chunk",
157
+ "created": timestamp,
158
+ "model": model,
159
+ "choices": [
160
+ {
161
+ "index": 0,
162
+ "delta": {"content": content, "role": "assistant"},
163
+ "finish_reason": finish_reason,
164
+ }
165
+ ],
166
+ "usage": None,
167
+ }
168
+
169
+ def message_to_dict(message, model_prefix: Optional[str] = None):
170
+ content = message.content if isinstance(message.content, str) else message.content[0]["text"]
171
+ if model_prefix:
172
+ content = f"{model_prefix} {content}"
173
+ if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
174
+ return {
175
+ "role": message.role,
176
+ "content": content,
177
+ "data": {
178
+ "imageBase64": message.content[1]["image_url"]["url"],
179
+ "fileText": "",
180
+ "title": "snapshot",
181
+ },
182
+ }
183
+ return {"role": message.role, "content": content}
184
+
185
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
186
+ if model_prefix and content.startswith(model_prefix):
187
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
188
+ return content[len(model_prefix):].strip()
189
+ return content