Spaces:
Running
Running
Commit
·
25a6ec3
1
Parent(s):
b27a873
Upd dynamic language
Browse files
app.py
CHANGED
@@ -133,7 +133,7 @@ def load_faiss_index():
|
|
133 |
return index
|
134 |
|
135 |
# ✅ Retrieve Medical Info (256,916 scenario)
|
136 |
-
def retrieve_medical_info(query, k=5, min_sim=0.
|
137 |
global index
|
138 |
index = load_faiss_index()
|
139 |
if index is None:
|
@@ -180,7 +180,7 @@ def retrieve_medical_info(query, k=5, min_sim=0.8): # Min similarity between que
|
|
180 |
|
181 |
|
182 |
# ✅ Retrieve Sym-Dia Info (4,962 scenario)
|
183 |
-
def retrieve_diagnosis_from_symptoms(symptom_text, top_k=5, min_sim=0.
|
184 |
global SYMPTOM_VECTORS, SYMPTOM_DOCS
|
185 |
# Lazy load
|
186 |
if SYMPTOM_VECTORS is None:
|
@@ -243,21 +243,21 @@ class RAGMedicalChatbot:
|
|
243 |
# Append image diagnosis from VLM
|
244 |
if image_diagnosis:
|
245 |
parts.append(
|
246 |
-
"
|
247 |
f"{image_diagnosis}\n\n"
|
248 |
"➡️ Please incorporate the above findings in your response if medically relevant.\n\n"
|
249 |
)
|
250 |
# Historical chat retrieval case
|
251 |
if context:
|
252 |
-
parts.append("Relevant context from prior conversation:\n" + "\n".join(context))
|
253 |
# Load up guideline
|
254 |
if knowledge_base:
|
255 |
-
parts.append(f"
|
256 |
# Symptom-Diagnosis prediction RAG
|
257 |
if diagnosis_guides:
|
258 |
-
parts.append("Symptom-based diagnosis guidance:\n" + "\n".join(diagnosis_guides))
|
259 |
-
parts.append(f"
|
260 |
-
parts.append(f"Language: {lang}")
|
261 |
prompt = "\n\n".join(parts)
|
262 |
logger.info(f"[LLM] Question query in `prompt`: {prompt}") # Debug out checking RAG on kb and history
|
263 |
response = gemini_flash_completion(prompt, model=self.model_name, temperature=0.7)
|
|
|
133 |
return index
|
134 |
|
135 |
# ✅ Retrieve Medical Info (256,916 scenario)
|
136 |
+
def retrieve_medical_info(query, k=5, min_sim=0.9): # Min similarity between query and kb is to be 80%
|
137 |
global index
|
138 |
index = load_faiss_index()
|
139 |
if index is None:
|
|
|
180 |
|
181 |
|
182 |
# ✅ Retrieve Sym-Dia Info (4,962 scenario)
|
183 |
+
def retrieve_diagnosis_from_symptoms(symptom_text, top_k=5, min_sim=0.5):
|
184 |
global SYMPTOM_VECTORS, SYMPTOM_DOCS
|
185 |
# Lazy load
|
186 |
if SYMPTOM_VECTORS is None:
|
|
|
243 |
# Append image diagnosis from VLM
|
244 |
if image_diagnosis:
|
245 |
parts.append(
|
246 |
+
"A user medical image is diagnosed by our VLM agent:\n"
|
247 |
f"{image_diagnosis}\n\n"
|
248 |
"➡️ Please incorporate the above findings in your response if medically relevant.\n\n"
|
249 |
)
|
250 |
# Historical chat retrieval case
|
251 |
if context:
|
252 |
+
parts.append("Relevant chat history context from prior conversation:\n" + "\n".join(context))
|
253 |
# Load up guideline
|
254 |
if knowledge_base:
|
255 |
+
parts.append(f"Example Q&A medical scenario knowledge-base: {knowledge_base}")
|
256 |
# Symptom-Diagnosis prediction RAG
|
257 |
if diagnosis_guides:
|
258 |
+
parts.append("Symptom-based diagnosis guidance (if applicable):\n" + "\n".join(diagnosis_guides))
|
259 |
+
parts.append(f"User's question: {user_query}")
|
260 |
+
parts.append(f"Language to generate answer: {lang}")
|
261 |
prompt = "\n\n".join(parts)
|
262 |
logger.info(f"[LLM] Question query in `prompt`: {prompt}") # Debug out checking RAG on kb and history
|
263 |
response = gemini_flash_completion(prompt, model=self.model_name, temperature=0.7)
|
memory.py
CHANGED
@@ -135,16 +135,17 @@ class MemoryManager:
|
|
135 |
if not response: return []
|
136 |
# Gemini instruction
|
137 |
instructions = []
|
138 |
-
if lang.upper() != "EN":
|
139 |
-
|
140 |
instructions.append("- Break the translated (or original) text into semantically distinct parts, grouped by medical topic or symptom.")
|
141 |
instructions.append("- For each part, generate a clear, concise summary. The summary may vary in length depending on the complexity of the topic — do not omit key clinical instructions.")
|
142 |
instructions.append("- At the start of each part, write `Topic: <one line description>`.")
|
143 |
instructions.append("- Separate each part using three dashes `---` on a new line.")
|
|
|
|
|
144 |
# Gemini prompt
|
145 |
prompt = f"""
|
146 |
You are a medical assistant helping organize and condense a clinical response.
|
147 |
-
Below is the user-provided medical response written in `{lang}`:
|
148 |
------------------------
|
149 |
{response}
|
150 |
------------------------
|
|
|
135 |
if not response: return []
|
136 |
# Gemini instruction
|
137 |
instructions = []
|
138 |
+
# if lang.upper() != "EN":
|
139 |
+
# instructions.append("- Translate the response to English.")
|
140 |
instructions.append("- Break the translated (or original) text into semantically distinct parts, grouped by medical topic or symptom.")
|
141 |
instructions.append("- For each part, generate a clear, concise summary. The summary may vary in length depending on the complexity of the topic — do not omit key clinical instructions.")
|
142 |
instructions.append("- At the start of each part, write `Topic: <one line description>`.")
|
143 |
instructions.append("- Separate each part using three dashes `---` on a new line.")
|
144 |
+
# if lang.upper() != "EN":
|
145 |
+
# instructions.append(f"Below is the user-provided medical response written in `{lang}`")
|
146 |
# Gemini prompt
|
147 |
prompt = f"""
|
148 |
You are a medical assistant helping organize and condense a clinical response.
|
|
|
149 |
------------------------
|
150 |
{response}
|
151 |
------------------------
|