Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import math
|
4 |
+
from openai import OpenAI
|
5 |
+
import requests
|
6 |
+
import gradio as gr
|
7 |
+
import pandas as pd
|
8 |
+
import concurrent.futures
|
9 |
+
from datasets import Dataset
|
10 |
+
from tqdm import tqdm
|
11 |
+
from ragas import evaluate, SingleTurnSample
|
12 |
+
from ragas.llms import LangchainLLMWrapper
|
13 |
+
from ragas.embeddings import LangchainEmbeddingsWrapper
|
14 |
+
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
15 |
+
from ragas.metrics import (
|
16 |
+
ResponseRelevancy, LLMContextPrecisionWithReference, LLMContextRecall,
|
17 |
+
ContextEntityRecall, Faithfulness, NoiseSensitivity, SemanticSimilarity, FactualCorrectness
|
18 |
+
)
|
19 |
+
|
20 |
+
# 設定輸出編碼為 UTF-8(解決中文顯示問題)
|
21 |
+
sys.stdout.reconfigure(encoding="utf-8")
|
22 |
+
|
23 |
+
# 從Google Drive下載 Ground Truth
|
24 |
+
gt_url = os.environ.get("GT_URL")
|
25 |
+
gt_path = "tender_groundtruth.csv"
|
26 |
+
|
27 |
+
if gt_url and not os.path.exists(gt_path):
|
28 |
+
print("嘗試下載 Ground Truth...")
|
29 |
+
r = requests.get(gt_url)
|
30 |
+
print("HTTP 狀態碼:", r.status_code)
|
31 |
+
if r.status_code != 200:
|
32 |
+
print("下載失敗內容預覽:", r.text[:500])
|
33 |
+
else:
|
34 |
+
with open(gt_path, "wb") as f:
|
35 |
+
f.write(r.content)
|
36 |
+
|
37 |
+
# 綁定實驗室Google帳號(Python TA)Google Sheet,以記錄評估logs
|
38 |
+
def log_to_google_sheet(question, answer, contexts, scores):
|
39 |
+
url = os.environ.get("G_SHEET_URL")
|
40 |
+
if not url:
|
41 |
+
print("G_SHEET_URL 未設定,略過記錄")
|
42 |
+
return
|
43 |
+
try:
|
44 |
+
payload = {
|
45 |
+
"question": question,
|
46 |
+
"answer": answer,
|
47 |
+
"contexts": contexts,
|
48 |
+
"faithfulness": scores.get("Faithfulness"),
|
49 |
+
"answer_relevancy": scores.get("Answer Relevancy"),
|
50 |
+
"semantic_similarity": scores.get("Semantic Similarity"),
|
51 |
+
"context_precision": scores.get("Context Precision"),
|
52 |
+
"context_recall": scores.get("Context Recall"),
|
53 |
+
"context_entity_recall": scores.get("Context Entity Recall")
|
54 |
+
}
|
55 |
+
response = requests.post(url, json=payload)
|
56 |
+
print("成功寫入 Google Sheet:", response.status_code)
|
57 |
+
except Exception as e:
|
58 |
+
print("寫入 Google Sheet 失敗:", str(e))
|
59 |
+
|
60 |
+
def validate_openai_key(api_key):
|
61 |
+
try:
|
62 |
+
client = OpenAI(api_key=api_key)
|
63 |
+
client.chat.completions.create(
|
64 |
+
model="gpt-3.5-turbo",
|
65 |
+
messages=[{"role": "user", "content": "hi"}],
|
66 |
+
max_tokens=1
|
67 |
+
)
|
68 |
+
return None
|
69 |
+
except Exception as e:
|
70 |
+
err_msg = str(e)
|
71 |
+
if "Incorrect API key provided" in err_msg:
|
72 |
+
return pd.DataFrame([{"錯誤訊息": " 您輸入的 OpenAI API Key 有誤,請確認是否貼錯、字數不符或格式異常。"}]), None
|
73 |
+
elif "exceeded your current quota" in err_msg:
|
74 |
+
return pd.DataFrame([{"錯誤訊息": "您的 OpenAI 帳戶額度已用盡,請前往帳戶頁面檢查餘額。"}]), None
|
75 |
+
elif "Rate limit" in err_msg:
|
76 |
+
return pd.DataFrame([{"錯誤訊息": "OpenAI 請求頻率過高,請稍後再試"}]), None
|
77 |
+
else:
|
78 |
+
return pd.DataFrame([{"錯誤訊息": f"API Key 錯誤:{err_msg}"}]), None
|
79 |
+
|
80 |
+
def RAG_evaluation(uploaded_file, user_api_key):
|
81 |
+
try:
|
82 |
+
# 檢查 OpenAI API Key 是否有效
|
83 |
+
validation_result = validate_openai_key(user_api_key)
|
84 |
+
if validation_result:
|
85 |
+
return validation_result
|
86 |
+
|
87 |
+
os.environ["OPENAI_API_KEY"] = user_api_key
|
88 |
+
print("評估開始")
|
89 |
+
|
90 |
+
if not os.path.exists(gt_path):
|
91 |
+
print("找不到 Ground Truth!")
|
92 |
+
return pd.DataFrame(), None
|
93 |
+
|
94 |
+
gt_df = pd.read_csv(gt_path)
|
95 |
+
df = pd.read_csv(uploaded_file.name, converters={"Context": eval})
|
96 |
+
print(f"上傳檔案筆數:{len(df)},GT 檔案筆數:{len(gt_df)}")
|
97 |
+
|
98 |
+
merged_df = pd.merge(df, gt_df[["Question", "Answer"]], on="Question", suffixes=("", "_GroundTruth"))
|
99 |
+
merged_df = merged_df.rename(columns={"Answer_GroundTruth": "GroundTruth"})
|
100 |
+
print(f"成功合併筆數:{len(merged_df)} / {len(df)}")
|
101 |
+
if len(merged_df) < len(df):
|
102 |
+
missing = df[~df["Question"].isin(merged_df["Question"])]
|
103 |
+
print("未合併題目:", missing["Question"].tolist())
|
104 |
+
if merged_df.empty:
|
105 |
+
return pd.DataFrame([{"錯誤訊息": "合併後無資料,請確認題目與 GT 是否對應"}]), None
|
106 |
+
|
107 |
+
llm_wrapper = LangchainLLMWrapper(ChatOpenAI(model="gpt-4o-mini-2024-07-18"))
|
108 |
+
embedding_wrapper = LangchainEmbeddingsWrapper(OpenAIEmbeddings(model="text-embedding-3-large"))
|
109 |
+
|
110 |
+
batch_size = 10
|
111 |
+
records = []
|
112 |
+
for batch_start in tqdm(range(0, len(merged_df), batch_size), desc="RAGAS Batch Evaluating"):
|
113 |
+
batch_df = merged_df.iloc[batch_start:batch_start + batch_size]
|
114 |
+
|
115 |
+
samples = []
|
116 |
+
for _, row in batch_df.iterrows():
|
117 |
+
if not isinstance(row["Context"], list):
|
118 |
+
print(f"Context 非 list,跳過。值:{row['Question']}")
|
119 |
+
continue
|
120 |
+
|
121 |
+
sample = SingleTurnSample(
|
122 |
+
user_input=row["Question"],
|
123 |
+
response=row["Answer"],
|
124 |
+
retrieved_contexts=row["Context"],
|
125 |
+
reference=row["GroundTruth"],
|
126 |
+
)
|
127 |
+
samples.append(sample)
|
128 |
+
|
129 |
+
try:
|
130 |
+
dataset = Dataset.from_list([s.to_dict() for s in samples])
|
131 |
+
result = evaluate(
|
132 |
+
dataset=dataset,
|
133 |
+
metrics=[
|
134 |
+
LLMContextPrecisionWithReference(), # context precision
|
135 |
+
LLMContextRecall(), # context recall
|
136 |
+
ContextEntityRecall(),
|
137 |
+
# NoiseSensitivity(),
|
138 |
+
Faithfulness(), # faithfulness
|
139 |
+
ResponseRelevancy(), # answer relevancy
|
140 |
+
SemanticSimilarity(), # semantic similarity
|
141 |
+
# FactualCorrectness()
|
142 |
+
],
|
143 |
+
llm=llm_wrapper,
|
144 |
+
embeddings=embedding_wrapper,
|
145 |
+
show_progress=True
|
146 |
+
)
|
147 |
+
|
148 |
+
result_df = result.to_pandas()
|
149 |
+
|
150 |
+
for i, row in enumerate(result_df.itertuples()):
|
151 |
+
input_row = batch_df.iloc[i]
|
152 |
+
record = {
|
153 |
+
"Question": input_row["Question"],
|
154 |
+
"Faithfulness": getattr(row, "faithfulness", None),
|
155 |
+
"Answer Relevancy": getattr(row, "answer_relevancy", None),
|
156 |
+
"Semantic Similarity": getattr(row, "semantic_similarity", None),
|
157 |
+
# "Factual Correctness": getattr(row, "factual_correctness", None),
|
158 |
+
"Context Precision": getattr(row, "llm_context_precision_with_reference", None),
|
159 |
+
"Context Recall": getattr(row, "context_recall", None),
|
160 |
+
"Context Entity Recall": getattr(row, "context_entity_recall", None),
|
161 |
+
# "Noise Sensitivity": getattr(row, "noise_sensitivity_relevant", None)
|
162 |
+
}
|
163 |
+
|
164 |
+
for key in list(record.keys()):
|
165 |
+
val = record[key]
|
166 |
+
if isinstance(val, float) and not math.isfinite(val):
|
167 |
+
record[key] = ""
|
168 |
+
|
169 |
+
records.append(record)
|
170 |
+
|
171 |
+
log_to_google_sheet(
|
172 |
+
question=input_row["Question"],
|
173 |
+
answer=input_row["Answer"],
|
174 |
+
contexts=input_row["Context"],
|
175 |
+
scores=record
|
176 |
+
)
|
177 |
+
|
178 |
+
except Exception as e:
|
179 |
+
print(f"批次評估失敗(第 {batch_start+1} 筆起):{e}")
|
180 |
+
continue
|
181 |
+
|
182 |
+
score_df = pd.DataFrame(records).fillna("")
|
183 |
+
print("完成評估筆數:", len(score_df))
|
184 |
+
|
185 |
+
numeric_cols = score_df.drop(columns=["Question"]).select_dtypes(include="number")
|
186 |
+
if not numeric_cols.empty:
|
187 |
+
avg_row = numeric_cols.mean().to_dict()
|
188 |
+
avg_row["Question"] = "Average"
|
189 |
+
score_df = pd.concat([score_df, pd.DataFrame([avg_row])], ignore_index=True)
|
190 |
+
|
191 |
+
original_name = os.path.basename(uploaded_file.name)
|
192 |
+
filename = os.path.splitext(original_name)[0]
|
193 |
+
output_path = f"{filename}_result.csv"
|
194 |
+
score_df.to_csv(output_path, index=False, encoding="utf-8-sig")
|
195 |
+
print("評估結果已儲存:", output_path)
|
196 |
+
|
197 |
+
return score_df, output_path
|
198 |
+
|
199 |
+
except Exception as e:
|
200 |
+
print("評估函式整體錯誤:", str(e))
|
201 |
+
return pd.DataFrame([{"錯誤訊息": f"系統錯誤:{str(e)}"}]), None
|
202 |
+
|
203 |
+
# handle exception並執行RAG評估
|
204 |
+
def check_csv_and_run(file, key):
|
205 |
+
if file is None:
|
206 |
+
return pd.DataFrame([{"錯誤訊息": "請上傳檔案!"}]), None
|
207 |
+
|
208 |
+
if not key or key.strip() == "":
|
209 |
+
return pd.DataFrame([{"錯誤訊息": "請輸入 OpenAI API Key"}]), None
|
210 |
+
|
211 |
+
try:
|
212 |
+
df = pd.read_csv(file.name, encoding="utf-8-sig")
|
213 |
+
df.columns = [col.strip() for col in df.columns]
|
214 |
+
|
215 |
+
required_columns = {"Question", "Context", "Answer"}
|
216 |
+
actual_columns = set(df.columns)
|
217 |
+
|
218 |
+
if actual_columns != required_columns:
|
219 |
+
return pd.DataFrame([{"錯誤訊息": f"欄位錯誤:應包含欄位 {required_columns},實際為 {actual_columns}"}]), None
|
220 |
+
|
221 |
+
if df.shape[0] == 0:
|
222 |
+
return pd.DataFrame([{"錯誤訊息": "檔案中沒有資料列!"}]), None
|
223 |
+
|
224 |
+
invalid_rows = df[df["Question"].notnull() & (df["Answer"].isnull() | df["Context"].isnull())]
|
225 |
+
if len(invalid_rows) > 0:
|
226 |
+
missing_questions = "\n".join(f"- {q}" for q in invalid_rows["Question"].tolist())
|
227 |
+
return pd.DataFrame([{"錯誤訊息": f"發現 {len(invalid_rows)} 筆資料中 Answer 或 Context 為空:\n{missing_questions}"}]), None
|
228 |
+
|
229 |
+
# check eval context
|
230 |
+
try:
|
231 |
+
for i, val in df["Context"].dropna().items():
|
232 |
+
if not isinstance(eval(val), list):
|
233 |
+
return pd.DataFrame([{"錯誤訊息": f"第 {i + 1} 筆 Context 欄格式錯誤,請確認其內容應為 list"}]), None
|
234 |
+
except Exception as e:
|
235 |
+
return pd.DataFrame([{"錯誤訊息": f"Context 欄格式解析錯誤,請確認其為有效的 list 格式,例如 ['A', 'B']:{str(e)}"}]), None
|
236 |
+
|
237 |
+
# 若上傳之待評估檔案無錯誤,執行評估
|
238 |
+
try:
|
239 |
+
return RAG_evaluation(file, key)
|
240 |
+
# 檢查 OpenAI API Key 是否有效
|
241 |
+
except Exception as e:
|
242 |
+
error_message = str(e)
|
243 |
+
return pd.DataFrame([{"錯誤訊息": f"系統錯誤:{error_message}"}]), None
|
244 |
+
except Exception as e:
|
245 |
+
return pd.DataFrame([{"錯誤訊息": f"評估失敗:{str(e)}"}]), None
|
246 |
+
|
247 |
+
# Gradio 介面
|
248 |
+
with gr.Blocks() as demo:
|
249 |
+
gr.Markdown("""
|
250 |
+
## 📐 RAG系統評估工具 (分流B)
|
251 |
+
|
252 |
+
### 📄 使用說明
|
253 |
+
請上傳您 RAG 系統產出的結果檔案(需包含欄位:Question、Context、Answer),並填入您的 OpenAI API Key,以進行評估。
|
254 |
+
#### ⏳ 完整評估通常需耗時 1 小時以上。若無即時回應,請耐心等候,系統並未當機,謝謝您的理解。
|
255 |
+
🚦 注意:本工具部署於 Hugging Face Public Space,若同時有多位使用者使用,系統會將您的評估請求排入佇列。
|
256 |
+
為避免長時間等待,建議您**先僅送出 1 筆資料進行測試**,若進度條顯示之預估等待時間超過 2 小時(7000 秒以上),可能是其他使用者正在使用。
|
257 |
+
本頁為分流 A,您可以考慮改用其他分流或稍後再試,感謝您的耐心與配合!
|
258 |
+
- 🔁 [主頁面 (Main)](https://huggingface.co/spaces/KSLab/RAG_Evaluator)
|
259 |
+
- 🔁 [分流 A](https://huggingface.co/spaces/KSLab/RAG_Evaluator_A)
|
260 |
+
- 🔁 [分流 C](https://huggingface.co/spaces/KSLab/RAG_Evaluator_C)
|
261 |
+
""")
|
262 |
+
|
263 |
+
file_input = gr.File(label="上傳 Evaluation_Dataset.csv")
|
264 |
+
api_key_input = gr.Textbox(label="OpenAI API Key", type="password")
|
265 |
+
submit_btn = gr.Button("開始評估")
|
266 |
+
|
267 |
+
result_output = gr.Dataframe(label="評估結果")
|
268 |
+
download_link = gr.File(label="下載評估結果(CSV)")
|
269 |
+
|
270 |
+
# 常見QA文字
|
271 |
+
gr.Markdown("""
|
272 |
+
---
|
273 |
+
### ❓ 常見問題 & 解答
|
274 |
+
**Q: 什麼是「指令集」?**
|
275 |
+
A: 「指令集」是我們用來描述老師在課堂上所設計的各種學習活動操作流程。在與教學系統互動時,老師通常會透過一系列結構化的指令來引導學生完成任務,因此我們將這些可重複使用的操作流程統稱為「指令集」。
|
276 |
+
指令集也如同RESTful API一樣,我們有先盡力的與老師們溝通他們的需求,不過這些需求都只能視為一個草案,最終仍需要仰賴得標業者與老師們收斂,並且確定最終的版本來加以實作。
|
277 |
+
""")
|
278 |
+
|
279 |
+
def wrapped_fn(file, key):
|
280 |
+
return RAG_evaluation(file, key)
|
281 |
+
|
282 |
+
submit_btn.click(
|
283 |
+
fn=check_csv_and_run,
|
284 |
+
inputs=[file_input, api_key_input],
|
285 |
+
outputs=[result_output, download_link],
|
286 |
+
)
|
287 |
+
|
288 |
+
demo.launch()
|