ChienChung commited on
Commit
d8607c2
·
verified ·
1 Parent(s): cb598d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -33
app.py CHANGED
@@ -8,23 +8,7 @@ import re
8
  import requests
9
  import transformers
10
  import chardet
11
- #import deepeval
12
- try:
13
- import deepeval
14
- from deepeval import evaluate
15
- from deepeval.metrics import AnswerRelevancyMetric
16
- from deepeval.test_case import LLMTestCase
17
- DEEPEVAL_AVAILABLE = True
18
- except Exception as e:
19
- print(f"DeepEval import failed: {e}, continuing without evaluation features")
20
- DEEPEVAL_AVAILABLE = False
21
- # 創建備用函數
22
- def evaluate(*args, **kwargs):
23
- return []
24
- class AnswerRelevancyMetric:
25
- def __init__(self, *args, **kwargs): pass
26
- class LLMTestCase:
27
- def __init__(self, *args, **kwargs): pass
28
  import difflib
29
  from transformers import AutoModelForCausalLM, AutoTokenizer
30
  from transformers.models.llama.configuration_llama import LlamaConfig
@@ -71,19 +55,15 @@ tempfile.tempdir = "/tmp"
71
  # 在此處加入 DeepEval 的 monkey-patch,避免全域更改工作目錄
72
  original_evaluate = deepeval.evaluate
73
 
74
- # 在此處加入 DeepEval 的 monkey-patch,避免全域更改工作目錄
75
- if DEEPEVAL_AVAILABLE:
76
- original_evaluate = deepeval.evaluate
77
-
78
- def patched_evaluate(*args, **kwargs):
79
- current_dir = os.getcwd()
80
- try:
81
- os.chdir("/tmp")
82
- return original_evaluate(*args, **kwargs)
83
- finally:
84
- os.chdir(current_dir)
85
 
86
- deepeval.evaluate = patched_evaluate
87
 
88
 
89
  SHOW_EVAL = os.getenv("SHOW_EVAL", "false").lower() == "true"
@@ -169,7 +149,7 @@ print(f"Using device => {device}")
169
 
170
  hf_token = os.environ.get("HF_TOKEN")
171
  openai_api_key = os.environ.get("OPENAI_API_KEY")
172
- model_id = "meta-llama/Llama-3.2-3B-Instruct"
173
 
174
  config_path = hf_hub_download(
175
  repo_id=model_id,
@@ -294,7 +274,7 @@ def rag_llama_qa(query):
294
  def rag_gpt4_qa(query):
295
  raw_answer = qa_gpt.run(query)
296
 
297
- if SHOW_EVAL and DEEPEVAL_AVAILABLE:
298
  try:
299
  top_docs = retriever.get_relevant_documents(query)
300
  test_case = LLMTestCase(
@@ -336,7 +316,7 @@ def upload_and_chat(file, query):
336
  chain_type_kwargs={"prompt": custom_prompt}
337
  )
338
  raw_answer = qa_temp.run(query)
339
- if SHOW_EVAL and DEEPEVAL_AVAILABLE:
340
  try:
341
  test_case = LLMTestCase(
342
  input=query,
@@ -1232,7 +1212,7 @@ def multi_agent_chat_advanced(query: str, file=None) -> str:
1232
  #answer = session_graph_chain(query)["answer"]
1233
 
1234
  # ✅ DeepEval 評估僅在 Tab1 文件 QA 的情況下觸發
1235
- if SHOW_EVAL and DEEPEVAL_AVAILABLE:
1236
  try:
1237
  test_case = LLMTestCase(
1238
  input=query,
 
8
  import requests
9
  import transformers
10
  import chardet
11
+ import deepeval
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  import difflib
13
  from transformers import AutoModelForCausalLM, AutoTokenizer
14
  from transformers.models.llama.configuration_llama import LlamaConfig
 
55
  # 在此處加入 DeepEval 的 monkey-patch,避免全域更改工作目錄
56
  original_evaluate = deepeval.evaluate
57
 
58
+ def patched_evaluate(*args, **kwargs):
59
+ current_dir = os.getcwd()
60
+ try:
61
+ os.chdir("/tmp")
62
+ return original_evaluate(*args, **kwargs)
63
+ finally:
64
+ os.chdir(current_dir)
 
 
 
 
65
 
66
+ deepeval.evaluate = patched_evaluate
67
 
68
 
69
  SHOW_EVAL = os.getenv("SHOW_EVAL", "false").lower() == "true"
 
149
 
150
  hf_token = os.environ.get("HF_TOKEN")
151
  openai_api_key = os.environ.get("OPENAI_API_KEY")
152
+ model_id = "ChienChung/my-llama-1b"
153
 
154
  config_path = hf_hub_download(
155
  repo_id=model_id,
 
274
  def rag_gpt4_qa(query):
275
  raw_answer = qa_gpt.run(query)
276
 
277
+ if SHOW_EVAL:
278
  try:
279
  top_docs = retriever.get_relevant_documents(query)
280
  test_case = LLMTestCase(
 
316
  chain_type_kwargs={"prompt": custom_prompt}
317
  )
318
  raw_answer = qa_temp.run(query)
319
+ if SHOW_EVAL:
320
  try:
321
  test_case = LLMTestCase(
322
  input=query,
 
1212
  #answer = session_graph_chain(query)["answer"]
1213
 
1214
  # ✅ DeepEval 評估僅在 Tab1 文件 QA 的情況下觸發
1215
+ if SHOW_EVAL:
1216
  try:
1217
  test_case = LLMTestCase(
1218
  input=query,