Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -19,14 +19,21 @@ tokenizer.set_chat_template(tokenizer.chat_template)
|
|
19 |
|
20 |
|
21 |
def generate_response(prompt):
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
|
32 |
# 建立 Gradio 介面
|
|
|
19 |
|
20 |
|
21 |
def generate_response(prompt):
|
22 |
+
try:
|
23 |
+
generated = pipe.generate([prompt], max_length=1024)
|
24 |
+
tokenpersec=f'{generated.perf_metrics.get_throughput().mean:.2f}'
|
25 |
+
match = re.search(r"<think>(.*?)</think>(.*)", generated, re.DOTALL)
|
26 |
|
27 |
+
if match:
|
28 |
+
thinking = match.group(1).strip()
|
29 |
+
content = match.group(2).strip()
|
30 |
+
else:
|
31 |
+
thinking = "模型沒有提供思考過程"
|
32 |
+
content = generated # 或者 generated.text, 取決於 generated 物件的屬性
|
33 |
+
|
34 |
+
return tokenpersec, thinking, content
|
35 |
+
except Exception as e:
|
36 |
+
return "發生錯誤", "發生錯誤", f"生成回應時發生錯誤:{e}"
|
37 |
|
38 |
|
39 |
# 建立 Gradio 介面
|