Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -1,54 +1,8 @@
|
|
1 |
from flask import Flask, request, Response, stream_with_context, jsonify
|
2 |
from openai import OpenAI
|
3 |
import json
|
4 |
-
import tiktoken
|
5 |
-
#import httpx
|
6 |
|
7 |
app = Flask(__name__)
|
8 |
-
|
9 |
-
# 在请求头中指定你的API密钥名称
|
10 |
-
#MY_API_KEY = "sk-gyxzhao"
|
11 |
-
|
12 |
-
# 模型的最大上下文长度
|
13 |
-
MODEL_MAX_CONTEXT_LENGTH = {
|
14 |
-
"gpt-4": 8192,
|
15 |
-
"gpt-4-0613": 8192,
|
16 |
-
"gpt-4o": 4096,
|
17 |
-
"gpt-4-turbo": 4096,
|
18 |
-
"claude-3-opus-20240229": 4096
|
19 |
-
}
|
20 |
-
|
21 |
-
def calculate_max_tokens(model_name, messages, requested_max_tokens):
|
22 |
-
if model_name in ["gpt-4", "gpt-4-0613"]:
|
23 |
-
try:
|
24 |
-
encoding = tiktoken.encoding_for_model(model_name)
|
25 |
-
except Exception as e:
|
26 |
-
print(f"Error getting encoding for model {model_name}: {e}")
|
27 |
-
encoding = tiktoken.get_encoding("cl100k_base") # 使用通用编码作为后备
|
28 |
-
|
29 |
-
max_context_length = MODEL_MAX_CONTEXT_LENGTH[model_name]
|
30 |
-
|
31 |
-
tokens_per_message = 3 # 每个消息的固定令牌数 (role + content + message boundary tokens)
|
32 |
-
tokens_per_name = 1 # 如果消息中包含'name'字段,增加的令牌数
|
33 |
-
messages_length = 3 # 一开始的消息长度
|
34 |
-
|
35 |
-
for message in messages:
|
36 |
-
messages_length += tokens_per_message
|
37 |
-
for key, value in message.items():
|
38 |
-
messages_length += len(encoding.encode(value))
|
39 |
-
if key == 'name':
|
40 |
-
messages_length += tokens_per_name
|
41 |
-
|
42 |
-
#print(f"Message length in tokens: {messages_length}") # 打印消息长度以进行调试
|
43 |
-
|
44 |
-
max_tokens = max_context_length - messages_length
|
45 |
-
if requested_max_tokens:
|
46 |
-
max_tokens = min(max_tokens, requested_max_tokens)
|
47 |
-
|
48 |
-
return max(100, max_tokens) # 确保max_tokens至少为1
|
49 |
-
|
50 |
-
else:
|
51 |
-
return MODEL_MAX_CONTEXT_LENGTH.get(model_name, 4096) # 其他模型直接返回对应的最大token数
|
52 |
|
53 |
@app.route('/')
|
54 |
def index():
|
@@ -63,6 +17,7 @@ def chat():
|
|
63 |
return jsonify({"error": "Unauthorized"}), 401
|
64 |
|
65 |
api_key = auth_header.split(" ")[1]
|
|
|
66 |
|
67 |
data = request.json
|
68 |
#print("Received data:", data) # 打印请求体以进行调试
|
@@ -74,7 +29,6 @@ def chat():
|
|
74 |
model = data['model']
|
75 |
messages = data['messages']
|
76 |
temperature = data.get('temperature', 0.7) # 默认值0.7
|
77 |
-
requested_max_tokens = data.get('max_tokens', MODEL_MAX_CONTEXT_LENGTH.get(model, 4096))
|
78 |
#max_tokens = calculate_max_tokens(model, messages, requested_max_tokens)
|
79 |
top_p = data.get('top_p', 1.0) # 默认值1.0
|
80 |
n = data.get('n', 1) # 默认值1
|
@@ -82,40 +36,16 @@ def chat():
|
|
82 |
functions = data.get('functions', None) # Functions for function calling
|
83 |
function_call = data.get('function_call', None) # Specific function call request
|
84 |
|
85 |
-
# 检查 Claude 模型,调整消息格式
|
86 |
-
system_message = None
|
87 |
-
if model.startswith("claude"):
|
88 |
-
messages = [msg for msg in messages if msg['role'] != 'system']
|
89 |
-
if 'system' in data:
|
90 |
-
system_message = data['system']
|
91 |
-
|
92 |
# 创建每个请求的 OpenAI 客户端实例
|
93 |
client = OpenAI(
|
94 |
api_key=api_key,
|
95 |
-
base_url=
|
96 |
)
|
97 |
|
98 |
# 处理模型响应
|
99 |
if stream:
|
100 |
# 处理流式响应
|
101 |
def generate():
|
102 |
-
if model.startswith("claude"):
|
103 |
-
response = client.chat.completions.create(
|
104 |
-
model=model,
|
105 |
-
messages=messages,
|
106 |
-
temperature=temperature,
|
107 |
-
#max_tokens=max_tokens,
|
108 |
-
top_p=top_p,
|
109 |
-
n=n,
|
110 |
-
functions=functions,
|
111 |
-
function_call=function_call,
|
112 |
-
#system=system_message # 传递 system_message 作为顶级参数
|
113 |
-
)
|
114 |
-
content = response.choices[0].message.content
|
115 |
-
for i in range(0, len(content), 20): # 每20个字符分成一块
|
116 |
-
chunk = content[i:i+20]
|
117 |
-
yield f"data: {json.dumps({'choices': [{'delta': {'content': chunk}}]})}\n\n"
|
118 |
-
else:
|
119 |
response = client.chat.completions.create(
|
120 |
model=model,
|
121 |
messages=messages,
|
@@ -132,9 +62,8 @@ def chat():
|
|
132 |
|
133 |
return Response(stream_with_context(generate()), content_type='text/event-stream')
|
134 |
else:
|
135 |
-
#
|
136 |
-
|
137 |
-
response = client.chat.completions.create(
|
138 |
model=model,
|
139 |
messages=messages,
|
140 |
temperature=temperature,
|
@@ -143,54 +72,8 @@ def chat():
|
|
143 |
n=n,
|
144 |
functions=functions,
|
145 |
function_call=function_call,
|
146 |
-
|
147 |
-
|
148 |
-
else:
|
149 |
-
response = client.chat.completions.create(
|
150 |
-
model=model,
|
151 |
-
messages=messages,
|
152 |
-
temperature=temperature,
|
153 |
-
#max_tokens=max_tokens,
|
154 |
-
top_p=top_p,
|
155 |
-
n=n,
|
156 |
-
functions=functions,
|
157 |
-
function_call=function_call,
|
158 |
-
)
|
159 |
-
|
160 |
-
|
161 |
-
# 打印响应
|
162 |
-
#print("API response:", response)
|
163 |
-
|
164 |
-
# 将响应转换为字典
|
165 |
-
response_dict = {
|
166 |
-
"id": response.id,
|
167 |
-
"object": response.object,
|
168 |
-
"created": response.created,
|
169 |
-
"model": response.model,
|
170 |
-
"choices": [
|
171 |
-
{
|
172 |
-
"message": {
|
173 |
-
"role": choice.message.role,
|
174 |
-
"content": choice.message.content
|
175 |
-
},
|
176 |
-
"index": choice.index,
|
177 |
-
"finish_reason": choice.finish_reason,
|
178 |
-
"logprobs": choice.logprobs.__dict__ if choice.logprobs else None # 转换ChoiceLogprobs为字典
|
179 |
-
}
|
180 |
-
for choice in response.choices
|
181 |
-
],
|
182 |
-
"usage": {
|
183 |
-
"prompt_tokens": response.usage.prompt_tokens,
|
184 |
-
"completion_tokens": response.usage.completion_tokens,
|
185 |
-
"total_tokens": response.usage.total_tokens
|
186 |
-
}
|
187 |
-
}
|
188 |
-
|
189 |
-
# 打印JSON格式的响应字典
|
190 |
-
#print("Response dict:", json.dumps(response_dict, ensure_ascii=False, indent=2))
|
191 |
-
|
192 |
-
# 确保返回的JSON格式正确
|
193 |
-
return jsonify(response_dict), 200
|
194 |
|
195 |
except Exception as e:
|
196 |
print("Exception:", e)
|
|
|
1 |
from flask import Flask, request, Response, stream_with_context, jsonify
|
2 |
from openai import OpenAI
|
3 |
import json
|
|
|
|
|
4 |
|
5 |
app = Flask(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
@app.route('/')
|
8 |
def index():
|
|
|
17 |
return jsonify({"error": "Unauthorized"}), 401
|
18 |
|
19 |
api_key = auth_header.split(" ")[1]
|
20 |
+
base_url= auth_header.split(" ")[2]
|
21 |
|
22 |
data = request.json
|
23 |
#print("Received data:", data) # 打印请求体以进行调试
|
|
|
29 |
model = data['model']
|
30 |
messages = data['messages']
|
31 |
temperature = data.get('temperature', 0.7) # 默认值0.7
|
|
|
32 |
#max_tokens = calculate_max_tokens(model, messages, requested_max_tokens)
|
33 |
top_p = data.get('top_p', 1.0) # 默认值1.0
|
34 |
n = data.get('n', 1) # 默认值1
|
|
|
36 |
functions = data.get('functions', None) # Functions for function calling
|
37 |
function_call = data.get('function_call', None) # Specific function call request
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
# 创建每个请求的 OpenAI 客户端实例
|
40 |
client = OpenAI(
|
41 |
api_key=api_key,
|
42 |
+
base_url=base_url,
|
43 |
)
|
44 |
|
45 |
# 处理模型响应
|
46 |
if stream:
|
47 |
# 处理流式响应
|
48 |
def generate():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
response = client.chat.completions.create(
|
50 |
model=model,
|
51 |
messages=messages,
|
|
|
62 |
|
63 |
return Response(stream_with_context(generate()), content_type='text/event-stream')
|
64 |
else:
|
65 |
+
# 处理非流式响应
|
66 |
+
response = client.chat.completions.create(
|
|
|
67 |
model=model,
|
68 |
messages=messages,
|
69 |
temperature=temperature,
|
|
|
72 |
n=n,
|
73 |
functions=functions,
|
74 |
function_call=function_call,
|
75 |
+
)
|
76 |
+
return jsonify(response.to_dict())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
except Exception as e:
|
79 |
print("Exception:", e)
|