Spaces:
Build error
Build error
Zenith Wang
commited on
Commit
·
42ceccb
1
Parent(s):
68ecd47
Fix message submission logic - properly pass message to API
Browse files
app.py
CHANGED
@@ -32,10 +32,12 @@ def process_message(message, history, system_prompt, temperature, max_tokens, to
|
|
32 |
print(f"[DEBUG] Processing message: {message[:100] if message else 'None'}...")
|
33 |
|
34 |
if not message:
|
|
|
35 |
yield history
|
36 |
return
|
37 |
|
38 |
if not STEP_API_KEY:
|
|
|
39 |
history.append([message, "❌ API key not configured. Please add STEP_API_KEY in Settings."])
|
40 |
yield history
|
41 |
return
|
@@ -58,6 +60,7 @@ def process_message(message, history, system_prompt, temperature, max_tokens, to
|
|
58 |
image_content = image_path.split(',')[1]
|
59 |
else:
|
60 |
image_content = image_to_base64(image_path)
|
|
|
61 |
except Exception as e:
|
62 |
print(f"[DEBUG] Failed to process image: {e}")
|
63 |
|
@@ -94,11 +97,14 @@ def process_message(message, history, system_prompt, temperature, max_tokens, to
|
|
94 |
messages.append({"role": "user", "content": text_content})
|
95 |
|
96 |
print(f"[DEBUG] Sending {len(messages)} messages to API")
|
|
|
97 |
|
98 |
# 创建客户端并调用API
|
99 |
try:
|
100 |
client = OpenAI(api_key=STEP_API_KEY, base_url=BASE_URL)
|
|
|
101 |
|
|
|
102 |
response = client.chat.completions.create(
|
103 |
model="step-3",
|
104 |
messages=messages,
|
@@ -108,22 +114,33 @@ def process_message(message, history, system_prompt, temperature, max_tokens, to
|
|
108 |
stream=True
|
109 |
)
|
110 |
|
|
|
|
|
111 |
# 流式输出
|
112 |
full_response = ""
|
|
|
113 |
for chunk in response:
|
|
|
114 |
if chunk.choices and len(chunk.choices) > 0:
|
115 |
delta = chunk.choices[0].delta
|
116 |
if hasattr(delta, 'content') and delta.content:
|
117 |
full_response += delta.content
|
118 |
history[-1][1] = full_response
|
|
|
|
|
119 |
yield history
|
120 |
|
|
|
|
|
121 |
if not full_response:
|
|
|
122 |
history[-1][1] = "⚠️ No response received from API"
|
123 |
yield history
|
124 |
|
125 |
except Exception as e:
|
126 |
print(f"[DEBUG] API error: {e}")
|
|
|
|
|
127 |
history[-1][1] = f"❌ Error: {str(e)}"
|
128 |
yield history
|
129 |
|
@@ -215,9 +232,24 @@ with gr.Blocks(title="Step-3 Chat", theme=gr.themes.Soft()) as demo:
|
|
215 |
label=""
|
216 |
)
|
217 |
|
218 |
-
#
|
219 |
def user_submit(message, history):
|
220 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
221 |
|
222 |
def undo_last(history):
|
223 |
if history:
|
@@ -227,29 +259,34 @@ with gr.Blocks(title="Step-3 Chat", theme=gr.themes.Soft()) as demo:
|
|
227 |
def retry_last(history):
|
228 |
if history and history[-1][0]:
|
229 |
last_message = history[-1][0]
|
230 |
-
|
|
|
231 |
return history, ""
|
232 |
|
233 |
-
#
|
|
|
|
|
|
|
234 |
msg.submit(
|
235 |
user_submit,
|
236 |
[msg, chatbot],
|
237 |
-
[msg, chatbot],
|
238 |
queue=False
|
239 |
).then(
|
240 |
-
|
241 |
-
[
|
242 |
chatbot
|
243 |
)
|
244 |
|
|
|
245 |
submit_btn.click(
|
246 |
user_submit,
|
247 |
[msg, chatbot],
|
248 |
-
[msg, chatbot],
|
249 |
queue=False
|
250 |
).then(
|
251 |
-
|
252 |
-
[
|
253 |
chatbot
|
254 |
)
|
255 |
|
@@ -271,10 +308,10 @@ with gr.Blocks(title="Step-3 Chat", theme=gr.themes.Soft()) as demo:
|
|
271 |
retry_btn.click(
|
272 |
retry_last,
|
273 |
chatbot,
|
274 |
-
[chatbot,
|
275 |
).then(
|
276 |
-
|
277 |
-
[
|
278 |
chatbot
|
279 |
)
|
280 |
|
|
|
32 |
print(f"[DEBUG] Processing message: {message[:100] if message else 'None'}...")
|
33 |
|
34 |
if not message:
|
35 |
+
print("[DEBUG] No message provided, skipping")
|
36 |
yield history
|
37 |
return
|
38 |
|
39 |
if not STEP_API_KEY:
|
40 |
+
print("[DEBUG] No API key configured")
|
41 |
history.append([message, "❌ API key not configured. Please add STEP_API_KEY in Settings."])
|
42 |
yield history
|
43 |
return
|
|
|
60 |
image_content = image_path.split(',')[1]
|
61 |
else:
|
62 |
image_content = image_to_base64(image_path)
|
63 |
+
print(f"[DEBUG] Image processed successfully")
|
64 |
except Exception as e:
|
65 |
print(f"[DEBUG] Failed to process image: {e}")
|
66 |
|
|
|
97 |
messages.append({"role": "user", "content": text_content})
|
98 |
|
99 |
print(f"[DEBUG] Sending {len(messages)} messages to API")
|
100 |
+
print(f"[DEBUG] Last message: {messages[-1]}")
|
101 |
|
102 |
# 创建客户端并调用API
|
103 |
try:
|
104 |
client = OpenAI(api_key=STEP_API_KEY, base_url=BASE_URL)
|
105 |
+
print("[DEBUG] Client created successfully")
|
106 |
|
107 |
+
print("[DEBUG] Calling API...")
|
108 |
response = client.chat.completions.create(
|
109 |
model="step-3",
|
110 |
messages=messages,
|
|
|
114 |
stream=True
|
115 |
)
|
116 |
|
117 |
+
print("[DEBUG] API call successful, processing stream...")
|
118 |
+
|
119 |
# 流式输出
|
120 |
full_response = ""
|
121 |
+
chunk_count = 0
|
122 |
for chunk in response:
|
123 |
+
chunk_count += 1
|
124 |
if chunk.choices and len(chunk.choices) > 0:
|
125 |
delta = chunk.choices[0].delta
|
126 |
if hasattr(delta, 'content') and delta.content:
|
127 |
full_response += delta.content
|
128 |
history[-1][1] = full_response
|
129 |
+
if chunk_count % 10 == 0:
|
130 |
+
print(f"[DEBUG] Received {chunk_count} chunks, {len(full_response)} chars")
|
131 |
yield history
|
132 |
|
133 |
+
print(f"[DEBUG] Stream complete. Total chunks: {chunk_count}, Total chars: {len(full_response)}")
|
134 |
+
|
135 |
if not full_response:
|
136 |
+
print("[DEBUG] No response content received")
|
137 |
history[-1][1] = "⚠️ No response received from API"
|
138 |
yield history
|
139 |
|
140 |
except Exception as e:
|
141 |
print(f"[DEBUG] API error: {e}")
|
142 |
+
import traceback
|
143 |
+
traceback.print_exc()
|
144 |
history[-1][1] = f"❌ Error: {str(e)}"
|
145 |
yield history
|
146 |
|
|
|
232 |
label=""
|
233 |
)
|
234 |
|
235 |
+
# 事件处理函数
|
236 |
def user_submit(message, history):
|
237 |
+
"""用户提交消息时的处理"""
|
238 |
+
print(f"[DEBUG] user_submit called with message: {message[:50] if message else 'None'}...")
|
239 |
+
if message:
|
240 |
+
# 保存消息内容用于后续处理
|
241 |
+
return gr.update(value=""), history, message
|
242 |
+
return gr.update(value=message), history, message
|
243 |
+
|
244 |
+
def bot_response(history, saved_message, system_prompt, temperature, max_tokens, top_p):
|
245 |
+
"""生成机器人响应"""
|
246 |
+
print(f"[DEBUG] bot_response called with saved_message: {saved_message[:50] if saved_message else 'None'}...")
|
247 |
+
if saved_message:
|
248 |
+
# 使用生成器处理消息
|
249 |
+
for updated_history in process_message(saved_message, history, system_prompt, temperature, max_tokens, top_p):
|
250 |
+
yield updated_history
|
251 |
+
else:
|
252 |
+
yield history
|
253 |
|
254 |
def undo_last(history):
|
255 |
if history:
|
|
|
259 |
def retry_last(history):
|
260 |
if history and history[-1][0]:
|
261 |
last_message = history[-1][0]
|
262 |
+
new_history = history[:-1]
|
263 |
+
return new_history, last_message
|
264 |
return history, ""
|
265 |
|
266 |
+
# 创建一个隐藏的组件来存储消息
|
267 |
+
saved_msg = gr.State("")
|
268 |
+
|
269 |
+
# 提交消息 - Enter键
|
270 |
msg.submit(
|
271 |
user_submit,
|
272 |
[msg, chatbot],
|
273 |
+
[msg, chatbot, saved_msg],
|
274 |
queue=False
|
275 |
).then(
|
276 |
+
bot_response,
|
277 |
+
[chatbot, saved_msg, system_prompt, temperature, max_tokens, top_p],
|
278 |
chatbot
|
279 |
)
|
280 |
|
281 |
+
# 提交消息 - Send按钮
|
282 |
submit_btn.click(
|
283 |
user_submit,
|
284 |
[msg, chatbot],
|
285 |
+
[msg, chatbot, saved_msg],
|
286 |
queue=False
|
287 |
).then(
|
288 |
+
bot_response,
|
289 |
+
[chatbot, saved_msg, system_prompt, temperature, max_tokens, top_p],
|
290 |
chatbot
|
291 |
)
|
292 |
|
|
|
308 |
retry_btn.click(
|
309 |
retry_last,
|
310 |
chatbot,
|
311 |
+
[chatbot, saved_msg]
|
312 |
).then(
|
313 |
+
bot_response,
|
314 |
+
[chatbot, saved_msg, system_prompt, temperature, max_tokens, top_p],
|
315 |
chatbot
|
316 |
)
|
317 |
|