ChienChung commited on
Commit
5f2337f
·
verified ·
1 Parent(s): def9f84

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -4
app.py CHANGED
@@ -8,7 +8,23 @@ import re
8
  import requests
9
  import transformers
10
  import chardet
11
- import deepeval
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  import difflib
13
  from transformers import AutoModelForCausalLM, AutoTokenizer
14
  from transformers.models.llama.configuration_llama import LlamaConfig
@@ -274,7 +290,7 @@ def rag_llama_qa(query):
274
  def rag_gpt4_qa(query):
275
  raw_answer = qa_gpt.run(query)
276
 
277
- if SHOW_EVAL:
278
  try:
279
  top_docs = retriever.get_relevant_documents(query)
280
  test_case = LLMTestCase(
@@ -316,7 +332,7 @@ def upload_and_chat(file, query):
316
  chain_type_kwargs={"prompt": custom_prompt}
317
  )
318
  raw_answer = qa_temp.run(query)
319
- if SHOW_EVAL:
320
  try:
321
  test_case = LLMTestCase(
322
  input=query,
@@ -1212,7 +1228,7 @@ def multi_agent_chat_advanced(query: str, file=None) -> str:
1212
  #answer = session_graph_chain(query)["answer"]
1213
 
1214
  # ✅ DeepEval 評估僅在 Tab1 文件 QA 的情況下觸發
1215
- if SHOW_EVAL:
1216
  try:
1217
  test_case = LLMTestCase(
1218
  input=query,
 
8
  import requests
9
  import transformers
10
  import chardet
11
+ #import deepeval
12
+ try:
13
+ import deepeval
14
+ from deepeval import evaluate
15
+ from deepeval.metrics import AnswerRelevancyMetric
16
+ from deepeval.test_case import LLMTestCase
17
+ DEEPEVAL_AVAILABLE = True
18
+ except Exception as e:
19
+ print(f"DeepEval import failed: {e}, continuing without evaluation features")
20
+ DEEPEVAL_AVAILABLE = False
21
+ # 創建備用函數
22
+ def evaluate(*args, **kwargs):
23
+ return []
24
+ class AnswerRelevancyMetric:
25
+ def __init__(self, *args, **kwargs): pass
26
+ class LLMTestCase:
27
+ def __init__(self, *args, **kwargs): pass
28
  import difflib
29
  from transformers import AutoModelForCausalLM, AutoTokenizer
30
  from transformers.models.llama.configuration_llama import LlamaConfig
 
290
  def rag_gpt4_qa(query):
291
  raw_answer = qa_gpt.run(query)
292
 
293
+ if SHOW_EVAL and DEEPEVAL_AVAILABLE:
294
  try:
295
  top_docs = retriever.get_relevant_documents(query)
296
  test_case = LLMTestCase(
 
332
  chain_type_kwargs={"prompt": custom_prompt}
333
  )
334
  raw_answer = qa_temp.run(query)
335
+ if SHOW_EVAL and DEEPEVAL_AVAILABLE:
336
  try:
337
  test_case = LLMTestCase(
338
  input=query,
 
1228
  #answer = session_graph_chain(query)["answer"]
1229
 
1230
  # ✅ DeepEval 評估僅在 Tab1 文件 QA 的情況下觸發
1231
+ if SHOW_EVAL and DEEPEVAL_AVAILABLE:
1232
  try:
1233
  test_case = LLMTestCase(
1234
  input=query,