eagle0504 commited on
Commit
1b16dac
·
verified ·
1 Parent(s): 6f928c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -17
app.py CHANGED
@@ -45,18 +45,38 @@ def call_chatgpt(prompt: str) -> str:
45
  return ans
46
 
47
 
48
- def ai_judge(prompt: str) -> float:
49
- """
50
- Uses the ChatGPT function to identify whether the content can answer the question
51
 
52
- Args:
53
- prompt: A string that represents the prompt
54
 
55
- Returns:
56
- float: A score
57
- """
 
 
58
 
59
- return call_chatgpt(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
 
62
  def query(payload: Dict[str, Any]) -> Dict[str, Any]:
@@ -251,18 +271,18 @@ if prompt := st.chat_input("Tell me about YSA"):
251
  for i in range(final_ref.shape[0]):
252
  this_quest = question
253
  this_content = final_ref["answers"][i]
254
- prompt_for_ai_judge = f"""
255
- The user asked a question: {question}
256
 
257
- We have found this content: {this_content}
258
 
259
- From 0 to 10, rate how well the content answer the user's question.
260
 
261
- Only produce a number from 0 to 10 while 10 being the best at answer user's question.
262
 
263
- If the content is a list of questions or not related to the user's question or it says inference endpoint is down, then you should say 0, because it does not answer user's question.
264
- """
265
- this_score = ai_judge(prompt_for_ai_judge)
266
  independent_ai_judge_score.append(this_score)
267
 
268
  final_ref["ai_judge"] = independent_ai_judge_score
 
45
  return ans
46
 
47
 
48
+ # def ai_judge(prompt: str) -> float:
49
+ # """
50
+ # Uses the ChatGPT function to identify whether the content can answer the question
51
 
52
+ # Args:
53
+ # prompt: A string that represents the prompt
54
 
55
+ # Returns:
56
+ # float: A score
57
+ # """
58
+
59
+ # return call_chatgpt(prompt)
60
 
61
+
62
+ def ai_judge(sentence1: str, sentence2: str) -> float:
63
+
64
+ HF_TOKEN = os.environ["HF_TOKEN"]
65
+ API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/msmarco-distilbert-base-tas-b"
66
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
67
+
68
+ def helper(payload):
69
+ response = requests.post(API_URL, headers=headers, json=payload)
70
+ return response.json()
71
+
72
+ data = helper(
73
+ {
74
+ "inputs": {
75
+ "source_sentence": sentence1,
76
+ "sentences": [sentence2]
77
+ }
78
+ }
79
+ return data[0]
80
 
81
 
82
  def query(payload: Dict[str, Any]) -> Dict[str, Any]:
 
271
  for i in range(final_ref.shape[0]):
272
  this_quest = question
273
  this_content = final_ref["answers"][i]
274
+ # prompt_for_ai_judge = f"""
275
+ # The user asked a question: {question}
276
 
277
+ # We have found this content: {this_content}
278
 
279
+ # From 0 to 10, rate how well the content answer the user's question.
280
 
281
+ # Only produce a number from 0 to 10 while 10 being the best at answer user's question.
282
 
283
+ # If the content is a list of questions or not related to the user's question or it says inference endpoint is down, then you should say 0, because it does not answer user's question.
284
+ # """
285
+ this_score = ai_judge(question, this_content)
286
  independent_ai_judge_score.append(this_score)
287
 
288
  final_ref["ai_judge"] = independent_ai_judge_score