Upload 2 files
Browse files- Dockerfile +13 -0
- app.py +769 -0
Dockerfile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10-slim
|
2 |
+
|
3 |
+
WORKDIR /app
|
4 |
+
|
5 |
+
RUN pip install --no-cache-dir flask flask-cors requests python-dotenv werkzeug
|
6 |
+
|
7 |
+
COPY . .
|
8 |
+
|
9 |
+
ENV PORT=5200
|
10 |
+
ENV MAX_WORKERS=20
|
11 |
+
EXPOSE 5200
|
12 |
+
|
13 |
+
CMD ["python", "app.py"]
|
app.py
ADDED
@@ -0,0 +1,769 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
import uuid
|
5 |
+
import hashlib
|
6 |
+
import base64
|
7 |
+
from datetime import datetime
|
8 |
+
from concurrent.futures import ThreadPoolExecutor
|
9 |
+
|
10 |
+
import requests
|
11 |
+
from flask import Flask, request, jsonify, Response, stream_with_context
|
12 |
+
from flask_cors import CORS
|
13 |
+
from dotenv import load_dotenv
|
14 |
+
|
15 |
+
# 加载环境变量
|
16 |
+
load_dotenv()
|
17 |
+
|
18 |
+
# ==================== 配置管理类 ====================
|
19 |
+
class Config:
|
20 |
+
"""全局配置管理"""
|
21 |
+
|
22 |
+
# 服务器配置
|
23 |
+
PORT = int(os.getenv('PORT', 5200))
|
24 |
+
MAX_WORKERS = int(os.getenv('MAX_WORKERS', 20))
|
25 |
+
|
26 |
+
# 认证配置
|
27 |
+
API_KEY = os.getenv('API_KEY', 'sk-123456')
|
28 |
+
RAYCAST_TOKEN = os.getenv('RAYCAST_TOKEN', 'rca_9455afe4694f6d63194263810fa7e93a659e21b0eb2e384e18b26dec53c0ee21')
|
29 |
+
|
30 |
+
# Raycast API 配置
|
31 |
+
RAYCAST_BASE_URL = 'https://backend.raycast.com/api/v1'
|
32 |
+
RAYCAST_CHAT_URL = f'{RAYCAST_BASE_URL}/ai/chat_completions'
|
33 |
+
RAYCAST_FILES_URL = f'{RAYCAST_BASE_URL}/ai/files/'
|
34 |
+
|
35 |
+
# Raycast 请求头配置
|
36 |
+
RAYCAST_HEADERS = {
|
37 |
+
'Content-Type': 'application/json',
|
38 |
+
'accept-language': 'zh-CN,zh-Hans;q=0.9',
|
39 |
+
'x-raycast-deviceid': 'c86ec3d4b2c9a66de6d1a19fc5bada76fc15af8f117dc1b69ba993391f0ad531',
|
40 |
+
'accept-encoding': 'gzip, deflate, br',
|
41 |
+
'user-agent': 'Raycast/1.0.4/747 (iOS Version 18.2.1 (Build 22C161))',
|
42 |
+
'Cookie': '__raycast_session=4eb4e28abc9196e140b1980c79b75cdc'
|
43 |
+
}
|
44 |
+
|
45 |
+
# 系统偏好设置
|
46 |
+
DEFAULT_SYSTEM_INSTRUCTIONS = f"""<user-preferences>
|
47 |
+
The user has the following system preferences:
|
48 |
+
- Locale: en-CN
|
49 |
+
- Timezone: Asia/Shanghai
|
50 |
+
- Current Date: {datetime.now().strftime('%Y-%m-%d')}
|
51 |
+
- Unit Currency: ¥
|
52 |
+
- Unit Temperature: °C
|
53 |
+
- Unit Length: m
|
54 |
+
- Unit Mass: kg
|
55 |
+
- Decimal Separator: .
|
56 |
+
- Grouping Separator: ,
|
57 |
+
Use the system preferences to format your answers accordingly.
|
58 |
+
</user-preferences>"""
|
59 |
+
|
60 |
+
@classmethod
|
61 |
+
def get_raycast_headers(cls, include_auth=True):
|
62 |
+
"""获取Raycast请求头"""
|
63 |
+
headers = cls.RAYCAST_HEADERS.copy()
|
64 |
+
if include_auth:
|
65 |
+
headers['authorization'] = f'Bearer {cls.RAYCAST_TOKEN}'
|
66 |
+
return headers
|
67 |
+
|
68 |
+
# 配置Flask应用
|
69 |
+
app = Flask(__name__)
|
70 |
+
CORS(app)
|
71 |
+
|
72 |
+
# 创建线程池
|
73 |
+
executor = ThreadPoolExecutor(max_workers=Config.MAX_WORKERS)
|
74 |
+
|
75 |
+
# ==================== 认证装饰器 ====================
|
76 |
+
def require_auth(f):
|
77 |
+
"""认证装饰器"""
|
78 |
+
def decorated_function(*args, **kwargs):
|
79 |
+
auth_header = request.headers.get('Authorization')
|
80 |
+
if not auth_header:
|
81 |
+
return jsonify({
|
82 |
+
'error': {
|
83 |
+
'message': '缺少认证头',
|
84 |
+
'type': 'authentication_error',
|
85 |
+
'code': 'missing_authorization'
|
86 |
+
}
|
87 |
+
}), 401
|
88 |
+
|
89 |
+
# 检查Bearer token格式
|
90 |
+
if not auth_header.startswith('Bearer '):
|
91 |
+
return jsonify({
|
92 |
+
'error': {
|
93 |
+
'message': '认证格式错误',
|
94 |
+
'type': 'authentication_error',
|
95 |
+
'code': 'invalid_authorization_format'
|
96 |
+
}
|
97 |
+
}), 401
|
98 |
+
|
99 |
+
token = auth_header[7:]
|
100 |
+
if token != Config.API_KEY:
|
101 |
+
return jsonify({
|
102 |
+
'error': {
|
103 |
+
'message': '认证失败',
|
104 |
+
'type': 'authentication_error',
|
105 |
+
'code': 'invalid_api_key'
|
106 |
+
}
|
107 |
+
}), 401
|
108 |
+
|
109 |
+
return f(*args, **kwargs)
|
110 |
+
decorated_function.__name__ = f.__name__
|
111 |
+
return decorated_function
|
112 |
+
|
113 |
+
# ==================== 工具类 ====================
|
114 |
+
class UtilsHelper:
|
115 |
+
@staticmethod
|
116 |
+
def generate_uuid():
|
117 |
+
return str(uuid.uuid4())
|
118 |
+
|
119 |
+
@staticmethod
|
120 |
+
def get_current_timestamp():
|
121 |
+
return int(time.time())
|
122 |
+
|
123 |
+
@staticmethod
|
124 |
+
def generate_md5(data):
|
125 |
+
if isinstance(data, str):
|
126 |
+
data = data.encode('utf-8')
|
127 |
+
return base64.b64encode(hashlib.md5(data).digest()).decode('utf-8')
|
128 |
+
|
129 |
+
@staticmethod
|
130 |
+
def is_search_model(model):
|
131 |
+
return model.endswith('-search')
|
132 |
+
|
133 |
+
@staticmethod
|
134 |
+
def get_base_model(model):
|
135 |
+
return model[:-7] if model.endswith('-search') else model
|
136 |
+
|
137 |
+
# ==================== 模型映射类 ====================
|
138 |
+
class ModelMapper:
|
139 |
+
BASE_MODELS = {
|
140 |
+
"ray1": "raycast",
|
141 |
+
"ray1-mini": "raycast",
|
142 |
+
"gpt-4.1": "openai",
|
143 |
+
"gpt-4.1-mini": "openai",
|
144 |
+
"gpt-4.1-nano": "openai",
|
145 |
+
"gpt-4": "openai",
|
146 |
+
"gpt-4-turbo": "openai",
|
147 |
+
"gpt-4o": "openai",
|
148 |
+
"gpt-4o-mini": "openai",
|
149 |
+
"o3": "openai_o1",
|
150 |
+
"o4-mini": "openai_o1",
|
151 |
+
"o1-mini": "openai_o1",
|
152 |
+
"o1-2024-12-17": "openai_o1",
|
153 |
+
"o3-mini": "openai_o1",
|
154 |
+
"claude-3-5-haiku-latest": "anthropic",
|
155 |
+
"claude-3-5-sonnet-latest": "anthropic",
|
156 |
+
"claude-3-7-sonnet-latest": "anthropic",
|
157 |
+
"claude-3-7-sonnet-latest-reasoning": "anthropic",
|
158 |
+
"claude-3-opus-20240229": "anthropic",
|
159 |
+
"claude-sonnet-4-20250514": "anthropic",
|
160 |
+
"claude-opus-4-20250514": "anthropic",
|
161 |
+
"claude-sonnet-4-20250514-reasoning": "anthropic",
|
162 |
+
"claude-opus-4-20250514-reasoning": "anthropic",
|
163 |
+
"sonar": "perplexity",
|
164 |
+
"sonar-pro": "perplexity",
|
165 |
+
"sonar-reasoning": "perplexity",
|
166 |
+
"sonar-reasoning-pro": "perplexity",
|
167 |
+
"meta-llama/llama-4-scout-17b-16e-instruct": "groq",
|
168 |
+
"llama-3.3-70b-versatile": "groq",
|
169 |
+
"llama-3.1-8b-instant": "groq",
|
170 |
+
"llama3-70b-8192": "groq",
|
171 |
+
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo": "together",
|
172 |
+
"open-mistral-nemo": "mistral",
|
173 |
+
"mistral-large-latest": "mistral",
|
174 |
+
"mistral-medium-latest": "mistral",
|
175 |
+
"mistral-small-latest": "mistral",
|
176 |
+
"codestral-latest": "mistral",
|
177 |
+
"deepseek-r1-distill-llama-70b": "groq",
|
178 |
+
"gemini-2.5-pro-preview-06-05": "google",
|
179 |
+
"gemini-1.5-flash": "google",
|
180 |
+
"gemini-2.5-flash-preview-04-17": "google",
|
181 |
+
"gemini-2.0-flash": "google",
|
182 |
+
"gemini-2.0-flash-thinking-exp-01-21": "google",
|
183 |
+
"deepseek-ai/DeepSeek-R1": "together",
|
184 |
+
"deepseek-ai/DeepSeek-V3": "together",
|
185 |
+
"grok-3-fast-beta": "xai",
|
186 |
+
"grok-3-mini-fast-beta": "xai",
|
187 |
+
"grok-2-latest": "xai"
|
188 |
+
}
|
189 |
+
|
190 |
+
# 生成完整的模型映射表(包含搜索版本)
|
191 |
+
@classmethod
|
192 |
+
def get_model_map(cls):
|
193 |
+
model_map = cls.BASE_MODELS.copy()
|
194 |
+
# 为每个基础模型添加带搜索功能的版本
|
195 |
+
for model in cls.BASE_MODELS.keys():
|
196 |
+
model_map[f"{model}-search"] = cls.BASE_MODELS[model]
|
197 |
+
return model_map
|
198 |
+
|
199 |
+
@classmethod
|
200 |
+
def get_provider(cls, model):
|
201 |
+
base_model = UtilsHelper.get_base_model(model)
|
202 |
+
return cls.get_model_map().get(base_model, 'google')
|
203 |
+
|
204 |
+
@classmethod
|
205 |
+
def get_actual_model(cls, model):
|
206 |
+
base_model = UtilsHelper.get_base_model(model)
|
207 |
+
provider = cls.get_provider(model)
|
208 |
+
|
209 |
+
if provider == 'raycast':
|
210 |
+
return 'gemini-2.5-flash-preview-04-17'
|
211 |
+
else:
|
212 |
+
return base_model
|
213 |
+
|
214 |
+
@classmethod
|
215 |
+
def get_all_models(cls):
|
216 |
+
return list(cls.get_model_map().keys())
|
217 |
+
|
218 |
+
# ==================== 工具功能类 ====================
|
219 |
+
class ToolsManager:
|
220 |
+
@staticmethod
|
221 |
+
def get_tools(use_search=False):
|
222 |
+
if not use_search:
|
223 |
+
return []
|
224 |
+
|
225 |
+
return [
|
226 |
+
{
|
227 |
+
"name": "search_images",
|
228 |
+
"type": "remote_tool"
|
229 |
+
},
|
230 |
+
{
|
231 |
+
"name": "web_search",
|
232 |
+
"type": "remote_tool"
|
233 |
+
}
|
234 |
+
]
|
235 |
+
|
236 |
+
# ==================== 文件上传类 ====================
|
237 |
+
class FileUploader:
|
238 |
+
@classmethod
|
239 |
+
def upload_file(cls, file_data):
|
240 |
+
try:
|
241 |
+
filename = file_data['filename']
|
242 |
+
content = file_data['content']
|
243 |
+
content_type = file_data['contentType']
|
244 |
+
|
245 |
+
buffer = base64.b64decode(content)
|
246 |
+
byte_size = len(buffer)
|
247 |
+
checksum = UtilsHelper.generate_md5(buffer)
|
248 |
+
|
249 |
+
# 创建文件元数据
|
250 |
+
create_file_payload = {
|
251 |
+
'blob': {
|
252 |
+
'byte_size': byte_size,
|
253 |
+
'checksum': checksum,
|
254 |
+
'content_type': content_type,
|
255 |
+
'filename': filename
|
256 |
+
}
|
257 |
+
}
|
258 |
+
|
259 |
+
headers = Config.get_raycast_headers()
|
260 |
+
headers['x-raycast-timestamp'] = str(UtilsHelper.get_current_timestamp())
|
261 |
+
headers['x-request-id'] = UtilsHelper.generate_uuid().upper()
|
262 |
+
|
263 |
+
create_response = requests.post(
|
264 |
+
Config.RAYCAST_FILES_URL,
|
265 |
+
headers=headers,
|
266 |
+
json=create_file_payload,
|
267 |
+
timeout=30
|
268 |
+
)
|
269 |
+
|
270 |
+
if not create_response.ok:
|
271 |
+
raise Exception(f'文件元数据创建失败: {create_response.status_code}')
|
272 |
+
|
273 |
+
create_result = create_response.json()
|
274 |
+
upload_url = create_result['direct_upload']['url']
|
275 |
+
file_id = create_result['id']
|
276 |
+
|
277 |
+
# 上传文件
|
278 |
+
upload_headers = {
|
279 |
+
'Content-Type': content_type,
|
280 |
+
'Content-MD5': checksum,
|
281 |
+
'Content-Length': str(byte_size),
|
282 |
+
'Content-Disposition': f'inline; filename="{filename}"; filename*=UTF-8\'\'{filename}',
|
283 |
+
'Upload-Complete': '?1'
|
284 |
+
}
|
285 |
+
|
286 |
+
upload_response = requests.put(
|
287 |
+
upload_url,
|
288 |
+
headers=upload_headers,
|
289 |
+
data=buffer,
|
290 |
+
timeout=60
|
291 |
+
)
|
292 |
+
|
293 |
+
if not upload_response.ok:
|
294 |
+
raise Exception(f'文件上传失败: {upload_response.status_code}')
|
295 |
+
|
296 |
+
return {
|
297 |
+
'id': file_id,
|
298 |
+
'type': 'file'
|
299 |
+
}
|
300 |
+
|
301 |
+
except Exception as error:
|
302 |
+
print(f'文件上传错误: {error}')
|
303 |
+
raise error
|
304 |
+
|
305 |
+
@classmethod
|
306 |
+
def extract_files_from_openai(cls, messages):
|
307 |
+
files = []
|
308 |
+
|
309 |
+
for message in messages:
|
310 |
+
if message.get('role') == 'user' and isinstance(message.get('content'), list):
|
311 |
+
for content in message['content']:
|
312 |
+
if content.get('type') == 'image_url' and content.get('image_url'):
|
313 |
+
url = content['image_url']['url']
|
314 |
+
if url.startswith('data:'):
|
315 |
+
# 处理base64图片
|
316 |
+
header, data = url.split(',', 1)
|
317 |
+
mime_match = header.split(':')[1].split(';')[0] if ':' in header else 'image/jpeg'
|
318 |
+
content_type = mime_match
|
319 |
+
|
320 |
+
files.append({
|
321 |
+
'filename': f'image_{UtilsHelper.generate_uuid()}.{content_type.split("/")[1]}',
|
322 |
+
'content': data,
|
323 |
+
'contentType': content_type
|
324 |
+
})
|
325 |
+
|
326 |
+
return files
|
327 |
+
|
328 |
+
# ==================== 转换类 ====================
|
329 |
+
class MessageConverter:
|
330 |
+
@classmethod
|
331 |
+
def merge_consecutive_messages(cls, messages):
|
332 |
+
"""合并连续相同角色的消息"""
|
333 |
+
if not messages:
|
334 |
+
return messages
|
335 |
+
|
336 |
+
merged_messages = []
|
337 |
+
current_message = None
|
338 |
+
|
339 |
+
for message in messages:
|
340 |
+
role = message.get('role')
|
341 |
+
content = message.get('content', '')
|
342 |
+
|
343 |
+
# 处理content为list的情况
|
344 |
+
if isinstance(content, list):
|
345 |
+
content = ''.join([
|
346 |
+
c.get('text', '') for c in content
|
347 |
+
if c.get('type') == 'text'
|
348 |
+
])
|
349 |
+
|
350 |
+
if current_message is None:
|
351 |
+
# 第一条消息
|
352 |
+
current_message = {
|
353 |
+
'role': role,
|
354 |
+
'content': content
|
355 |
+
}
|
356 |
+
elif current_message['role'] == role:
|
357 |
+
# 相同角色,合并内容
|
358 |
+
current_message['content'] += '\n' + content
|
359 |
+
else:
|
360 |
+
# 不同角色,保存当前消息并开始新消息
|
361 |
+
merged_messages.append(current_message)
|
362 |
+
current_message = {
|
363 |
+
'role': role,
|
364 |
+
'content': content
|
365 |
+
}
|
366 |
+
|
367 |
+
# 添加最后一条消息
|
368 |
+
if current_message:
|
369 |
+
merged_messages.append(current_message)
|
370 |
+
|
371 |
+
return merged_messages
|
372 |
+
|
373 |
+
@classmethod
|
374 |
+
def process_system_messages(cls, messages):
|
375 |
+
# 先合并连续相同角色的消息
|
376 |
+
merged_messages = cls.merge_consecutive_messages(messages)
|
377 |
+
|
378 |
+
processed_messages = []
|
379 |
+
additional_system_instructions = ''
|
380 |
+
system_collection_stopped = False
|
381 |
+
|
382 |
+
for message in merged_messages:
|
383 |
+
if message.get('role') == 'system':
|
384 |
+
if not system_collection_stopped:
|
385 |
+
# 连续的 system 消息收集到 additional_system_instructions
|
386 |
+
if additional_system_instructions:
|
387 |
+
additional_system_instructions += '\n' + message['content']
|
388 |
+
else:
|
389 |
+
additional_system_instructions = message['content']
|
390 |
+
else:
|
391 |
+
# 后续的 system 消息转换为 user 消息
|
392 |
+
processed_messages.append({
|
393 |
+
'author': 'user',
|
394 |
+
'content': {
|
395 |
+
'references': [],
|
396 |
+
'text': message['content']
|
397 |
+
}
|
398 |
+
})
|
399 |
+
else:
|
400 |
+
# 遇到非 system 消息,停止收集 system 消息
|
401 |
+
system_collection_stopped = True
|
402 |
+
|
403 |
+
processed_message = {
|
404 |
+
'author': 'user' if message.get('role') == 'user' else 'assistant',
|
405 |
+
'content': {
|
406 |
+
'references': [],
|
407 |
+
'text': message['content']
|
408 |
+
}
|
409 |
+
}
|
410 |
+
|
411 |
+
processed_messages.append(processed_message)
|
412 |
+
|
413 |
+
return processed_messages, additional_system_instructions
|
414 |
+
|
415 |
+
@classmethod
|
416 |
+
def convert_to_raycast_format(cls, openai_request):
|
417 |
+
processed_messages, additional_system_instructions = cls.process_system_messages(
|
418 |
+
openai_request['messages']
|
419 |
+
)
|
420 |
+
|
421 |
+
# 处理文件上传
|
422 |
+
files = FileUploader.extract_files_from_openai(openai_request['messages'])
|
423 |
+
attachments = []
|
424 |
+
|
425 |
+
for file in files:
|
426 |
+
try:
|
427 |
+
uploaded_file = FileUploader.upload_file(file)
|
428 |
+
attachments.append(uploaded_file)
|
429 |
+
except Exception as error:
|
430 |
+
print(f'文件上传失败: {error}')
|
431 |
+
|
432 |
+
# 如果有附件,添加到最后一个用户消息中
|
433 |
+
if attachments and processed_messages:
|
434 |
+
last_message = processed_messages[-1]
|
435 |
+
if last_message['author'] == 'user':
|
436 |
+
last_message['content']['attachments'] = attachments
|
437 |
+
|
438 |
+
actual_model = ModelMapper.get_actual_model(openai_request['model'])
|
439 |
+
provider = ModelMapper.get_provider(openai_request['model'])
|
440 |
+
use_search = UtilsHelper.is_search_model(openai_request['model'])
|
441 |
+
|
442 |
+
raycast_request = {
|
443 |
+
'additional_system_instructions': additional_system_instructions or Config.DEFAULT_SYSTEM_INSTRUCTIONS,
|
444 |
+
'debug': False,
|
445 |
+
'locale': 'en_CN',
|
446 |
+
'message_id': UtilsHelper.generate_uuid(),
|
447 |
+
'messages': processed_messages,
|
448 |
+
'model': actual_model,
|
449 |
+
'provider': 'google' if provider == 'raycast' else provider,
|
450 |
+
'source': 'ai_chat',
|
451 |
+
'tools': ToolsManager.get_tools(use_search)
|
452 |
+
}
|
453 |
+
|
454 |
+
return raycast_request
|
455 |
+
|
456 |
+
# ==================== 响应处理类 ====================
|
457 |
+
class ResponseProcessor:
|
458 |
+
def __init__(self):
|
459 |
+
self.is_thinking = False
|
460 |
+
self.thinking_content = ''
|
461 |
+
|
462 |
+
def process_raycast_chunk(self, chunk):
|
463 |
+
content = ''
|
464 |
+
|
465 |
+
# 处理思考内容
|
466 |
+
if chunk.get('reasoning'):
|
467 |
+
if not self.is_thinking:
|
468 |
+
# 开始思考
|
469 |
+
self.is_thinking = True
|
470 |
+
content += '<think>'
|
471 |
+
content += chunk['reasoning']
|
472 |
+
self.thinking_content += chunk['reasoning']
|
473 |
+
|
474 |
+
# 处理普通文本内容
|
475 |
+
if chunk.get('text'):
|
476 |
+
if self.is_thinking:
|
477 |
+
# 结束思考
|
478 |
+
content += '</think>'
|
479 |
+
self.is_thinking = False
|
480 |
+
self.thinking_content = ''
|
481 |
+
content += chunk['text']
|
482 |
+
|
483 |
+
return content
|
484 |
+
|
485 |
+
def convert_to_openai_format(self, raycast_chunk, model, is_stream=False):
|
486 |
+
content = self.process_raycast_chunk(raycast_chunk)
|
487 |
+
|
488 |
+
if is_stream:
|
489 |
+
return {
|
490 |
+
'id': 'chatcmpl-' + UtilsHelper.generate_uuid(),
|
491 |
+
'object': 'chat.completion.chunk',
|
492 |
+
'created': UtilsHelper.get_current_timestamp(),
|
493 |
+
'model': model,
|
494 |
+
'choices': [{
|
495 |
+
'index': 0,
|
496 |
+
'delta': {
|
497 |
+
'content': content
|
498 |
+
},
|
499 |
+
'finish_reason': None
|
500 |
+
}]
|
501 |
+
}
|
502 |
+
else:
|
503 |
+
return {
|
504 |
+
'id': 'chatcmpl-' + UtilsHelper.generate_uuid(),
|
505 |
+
'object': 'chat.completion',
|
506 |
+
'created': UtilsHelper.get_current_timestamp(),
|
507 |
+
'model': model,
|
508 |
+
'choices': [{
|
509 |
+
'index': 0,
|
510 |
+
'message': {
|
511 |
+
'role': 'assistant',
|
512 |
+
'content': content
|
513 |
+
},
|
514 |
+
'finish_reason': 'stop'
|
515 |
+
}],
|
516 |
+
'usage': {
|
517 |
+
'prompt_tokens': 0,
|
518 |
+
'completion_tokens': 0,
|
519 |
+
'total_tokens': 0
|
520 |
+
}
|
521 |
+
}
|
522 |
+
|
523 |
+
def finish_thinking(self):
|
524 |
+
if self.is_thinking:
|
525 |
+
self.is_thinking = False
|
526 |
+
return '</think>'
|
527 |
+
return ''
|
528 |
+
|
529 |
+
# ==================== API服务类 ====================
|
530 |
+
class RaycastAPIService:
|
531 |
+
@classmethod
|
532 |
+
def send_request(cls, raycast_request):
|
533 |
+
headers = Config.get_raycast_headers()
|
534 |
+
headers['x-raycast-timestamp'] = str(UtilsHelper.get_current_timestamp())
|
535 |
+
|
536 |
+
print(f'发送到 Raycast: {json.dumps(raycast_request, indent=2, ensure_ascii=False)}')
|
537 |
+
|
538 |
+
response = requests.post(
|
539 |
+
Config.RAYCAST_CHAT_URL,
|
540 |
+
headers=headers,
|
541 |
+
json=raycast_request,
|
542 |
+
stream=True,
|
543 |
+
timeout=120
|
544 |
+
)
|
545 |
+
|
546 |
+
if not response.ok:
|
547 |
+
error_text = response.text
|
548 |
+
print(f'Raycast API 错误响应: {error_text}')
|
549 |
+
raise Exception(f'Raycast API 响应错误: {response.status_code} {response.reason}')
|
550 |
+
|
551 |
+
return response
|
552 |
+
|
553 |
+
# ==================== 处理函数 ====================
|
554 |
+
def handle_chat_completion(request_data):
|
555 |
+
try:
|
556 |
+
print(f'收到请求: {json.dumps(request_data, indent=2, ensure_ascii=False)}')
|
557 |
+
|
558 |
+
# 转换请求格式
|
559 |
+
raycast_request = MessageConverter.convert_to_raycast_format(request_data)
|
560 |
+
|
561 |
+
# 发送请求到 Raycast
|
562 |
+
response = RaycastAPIService.send_request(raycast_request)
|
563 |
+
|
564 |
+
return response, request_data
|
565 |
+
|
566 |
+
except Exception as error:
|
567 |
+
print(f'代理错误: {error}')
|
568 |
+
raise error
|
569 |
+
|
570 |
+
def process_stream_response(response, request_data):
|
571 |
+
processor = ResponseProcessor()
|
572 |
+
|
573 |
+
def generate():
|
574 |
+
try:
|
575 |
+
buffer = ''
|
576 |
+
for chunk in response.iter_lines():
|
577 |
+
chunk = chunk.decode("utf-8").strip()
|
578 |
+
if chunk:
|
579 |
+
buffer += chunk + '\n'
|
580 |
+
lines = buffer.split('\n')
|
581 |
+
buffer = lines.pop() if lines else ''
|
582 |
+
|
583 |
+
for line in lines:
|
584 |
+
if line.strip():
|
585 |
+
try:
|
586 |
+
if line.startswith('data: '):
|
587 |
+
data = line[6:]
|
588 |
+
if data == '[DONE]':
|
589 |
+
# 检查是否需要关闭thinking标签
|
590 |
+
finish_content = processor.finish_thinking()
|
591 |
+
if finish_content:
|
592 |
+
finish_response = processor.convert_to_openai_format(
|
593 |
+
{'text': finish_content}, request_data['model'], True
|
594 |
+
)
|
595 |
+
yield f"data: {json.dumps(finish_response)}\n\n"
|
596 |
+
yield 'data: [DONE]\n\n'
|
597 |
+
return
|
598 |
+
|
599 |
+
parsed = json.loads(data)
|
600 |
+
openai_response = processor.convert_to_openai_format(
|
601 |
+
parsed, request_data['model'], True
|
602 |
+
)
|
603 |
+
yield f"data: {json.dumps(openai_response)}\n\n"
|
604 |
+
except Exception as err:
|
605 |
+
print(f'解析流式响应错误: {err}, 原始行: {line}')
|
606 |
+
|
607 |
+
yield 'data: [DONE]\n\n'
|
608 |
+
|
609 |
+
except Exception as err:
|
610 |
+
print(f'流式响应错误: {err}')
|
611 |
+
yield f'data: {json.dumps({"error": "流式响应处理错误"})}\n\n'
|
612 |
+
finally:
|
613 |
+
response.close()
|
614 |
+
|
615 |
+
return generate()
|
616 |
+
|
617 |
+
def process_non_stream_response(response, request_data):
|
618 |
+
processor = ResponseProcessor()
|
619 |
+
full_content = ''
|
620 |
+
|
621 |
+
try:
|
622 |
+
buffer = ''
|
623 |
+
for chunk in response.iter_lines():
|
624 |
+
chunk = chunk.decode("utf-8").strip()
|
625 |
+
if chunk:
|
626 |
+
buffer += chunk + '\n'
|
627 |
+
lines = buffer.split('\n')
|
628 |
+
buffer = lines.pop() if lines else ''
|
629 |
+
|
630 |
+
for line in lines:
|
631 |
+
if line.strip():
|
632 |
+
try:
|
633 |
+
if line.startswith('data: '):
|
634 |
+
data = line[6:]
|
635 |
+
if data == '[DONE]':
|
636 |
+
break # 结束处理
|
637 |
+
|
638 |
+
parsed = json.loads(data)
|
639 |
+
content = processor.process_raycast_chunk(parsed)
|
640 |
+
full_content += content
|
641 |
+
except Exception as err:
|
642 |
+
print(f'解析非流式响应错误: {err}, 原始行: {line}')
|
643 |
+
|
644 |
+
# 确保thinking标签正确关闭
|
645 |
+
finish_content = processor.finish_thinking()
|
646 |
+
full_content += finish_content
|
647 |
+
|
648 |
+
return {
|
649 |
+
'id': 'chatcmpl-' + UtilsHelper.generate_uuid(),
|
650 |
+
'object': 'chat.completion',
|
651 |
+
'created': UtilsHelper.get_current_timestamp(),
|
652 |
+
'model': request_data['model'],
|
653 |
+
'choices': [{
|
654 |
+
'index': 0,
|
655 |
+
'message': {
|
656 |
+
'role': 'assistant',
|
657 |
+
'content': full_content
|
658 |
+
},
|
659 |
+
'finish_reason': 'stop'
|
660 |
+
}],
|
661 |
+
'usage': {
|
662 |
+
'prompt_tokens': 0,
|
663 |
+
'completion_tokens': 0,
|
664 |
+
'total_tokens': 0
|
665 |
+
}
|
666 |
+
}
|
667 |
+
|
668 |
+
except Exception as err:
|
669 |
+
print(f'非流式响应错误: {err}')
|
670 |
+
raise err
|
671 |
+
finally:
|
672 |
+
response.close()
|
673 |
+
|
674 |
+
# ==================== 路由处理 ====================
|
675 |
+
|
676 |
+
@app.route('/v1/chat/completions', methods=['POST'])
|
677 |
+
@require_auth
|
678 |
+
def chat_completions():
|
679 |
+
try:
|
680 |
+
request_data = request.get_json()
|
681 |
+
if not request_data:
|
682 |
+
return jsonify({
|
683 |
+
'error': {
|
684 |
+
'message': '请求数据为空',
|
685 |
+
'type': 'invalid_request',
|
686 |
+
'code': 'invalid_request'
|
687 |
+
}
|
688 |
+
}), 400
|
689 |
+
|
690 |
+
is_stream = request_data.get('stream', False)
|
691 |
+
|
692 |
+
# 在线程池中处理请求
|
693 |
+
future = executor.submit(handle_chat_completion, request_data)
|
694 |
+
response, req_data = future.result()
|
695 |
+
|
696 |
+
if is_stream:
|
697 |
+
return Response(
|
698 |
+
stream_with_context(process_stream_response(response, req_data)),
|
699 |
+
content_type='text/event-stream',
|
700 |
+
headers={
|
701 |
+
'Cache-Control': 'no-cache',
|
702 |
+
'Connection': 'keep-alive',
|
703 |
+
'Access-Control-Allow-Origin': '*'
|
704 |
+
}
|
705 |
+
)
|
706 |
+
else:
|
707 |
+
future = executor.submit(process_non_stream_response, response, req_data)
|
708 |
+
result = future.result()
|
709 |
+
return jsonify(result)
|
710 |
+
|
711 |
+
except Exception as error:
|
712 |
+
return jsonify({
|
713 |
+
'error': {
|
714 |
+
'message': str(error) or '内部服务器错误',
|
715 |
+
'type': 'internal_error',
|
716 |
+
'code': 'internal_error'
|
717 |
+
}
|
718 |
+
}), 500
|
719 |
+
|
720 |
+
@app.route('/v1/models', methods=['GET'])
|
721 |
+
def list_models():
|
722 |
+
models = [
|
723 |
+
{
|
724 |
+
'id': model,
|
725 |
+
'object': 'model',
|
726 |
+
'created': UtilsHelper.get_current_timestamp(),
|
727 |
+
'owned_by': 'raycast-proxy'
|
728 |
+
}
|
729 |
+
for model in ModelMapper.get_all_models()
|
730 |
+
]
|
731 |
+
|
732 |
+
return jsonify({
|
733 |
+
'object': 'list',
|
734 |
+
'data': models
|
735 |
+
})
|
736 |
+
|
737 |
+
@app.route('/health', methods=['GET'])
|
738 |
+
def health_check():
|
739 |
+
return jsonify({
|
740 |
+
'status': 'ok',
|
741 |
+
'timestamp': datetime.now().isoformat(),
|
742 |
+
'models_count': len(ModelMapper.get_all_models()),
|
743 |
+
'config': {
|
744 |
+
'port': Config.PORT,
|
745 |
+
'max_workers': Config.MAX_WORKERS,
|
746 |
+
'auth_required': bool(Config.API_KEY)
|
747 |
+
}
|
748 |
+
})
|
749 |
+
|
750 |
+
@app.route('/', methods=['OPTIONS'])
|
751 |
+
@app.route('/v1/chat/completions', methods=['OPTIONS'])
|
752 |
+
@app.route('/v1/models', methods=['OPTIONS'])
|
753 |
+
def handle_options():
|
754 |
+
return '', 200
|
755 |
+
|
756 |
+
if __name__ == '__main__':
|
757 |
+
print(f'🚀 Raycast 代理服务器运行在端口 {Config.PORT}')
|
758 |
+
print(f'🔗 OpenAI 兼容端点: http://localhost:{Config.PORT}/v1/chat/completions')
|
759 |
+
print(f'📜 模型列表: http://localhost:{Config.PORT}/v1/models')
|
760 |
+
print(f'⚡ 最大工作线程数: {Config.MAX_WORKERS}')
|
761 |
+
|
762 |
+
# 使用支持多线程的WSGI服务器
|
763 |
+
app.run(
|
764 |
+
host='0.0.0.0',
|
765 |
+
port=Config.PORT,
|
766 |
+
debug=False,
|
767 |
+
threaded=True,
|
768 |
+
processes=1
|
769 |
+
)
|