Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -44,6 +44,20 @@ def call_chatgpt(prompt: str) -> str:
|
|
| 44 |
return ans
|
| 45 |
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
## rag strategy 1
|
| 48 |
# file_names = [f"output_files/file_{i}.txt" for i in range(131)]
|
| 49 |
# # file_names = [f"output_files_large/file_{i}.txt" for i in range(1310)]
|
|
@@ -163,6 +177,24 @@ if prompt := st.chat_input("Tell me about YSA"):
|
|
| 163 |
ref_from_db_search = ref["answers"]
|
| 164 |
final_ref = ref
|
| 165 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
engineered_prompt = f"""
|
| 167 |
Based on the context: {ref_from_db_search},
|
| 168 |
answer the user question: {question}.
|
|
|
|
| 44 |
return ans
|
| 45 |
|
| 46 |
|
| 47 |
+
def ai_judge(prompt: str) -> float:
|
| 48 |
+
"""
|
| 49 |
+
Uses the ChatGPT function to identify whether the content can answer the question
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
prompt: A string that represents the prompt
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
float: A score
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
return call_chatgpt(prompt)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
## rag strategy 1
|
| 62 |
# file_names = [f"output_files/file_{i}.txt" for i in range(131)]
|
| 63 |
# # file_names = [f"output_files_large/file_{i}.txt" for i in range(1310)]
|
|
|
|
| 177 |
ref_from_db_search = ref["answers"]
|
| 178 |
final_ref = ref
|
| 179 |
|
| 180 |
+
independent_ai_judge_score = []
|
| 181 |
+
for i in range(final_ref.shape[0]):
|
| 182 |
+
this_quest = question
|
| 183 |
+
this_content = final_ref["answers"][i]
|
| 184 |
+
prompt_for_ai_judge = f"""
|
| 185 |
+
The user asked a question: {question}
|
| 186 |
+
|
| 187 |
+
We have found this content: {this_content}
|
| 188 |
+
|
| 189 |
+
From 0 to 10, rate how well the content address the user's question.
|
| 190 |
+
|
| 191 |
+
Only produce a number from 0 to 10.
|
| 192 |
+
"""
|
| 193 |
+
this_score = ai_judge(prompt_for_ai_judge)
|
| 194 |
+
independent_ai_judge_score.append(this_score)
|
| 195 |
+
|
| 196 |
+
final_ref["ai_judge"] = independent_ai_judge_score
|
| 197 |
+
|
| 198 |
engineered_prompt = f"""
|
| 199 |
Based on the context: {ref_from_db_search},
|
| 200 |
answer the user question: {question}.
|