Spaces:
Running
Running
Commit
·
c14055f
1
Parent(s):
08a2f2d
Upd debugs
Browse files
app.py
CHANGED
@@ -279,20 +279,22 @@ async def chat_endpoint(req: Request):
|
|
279 |
query = query_raw.strip() if isinstance(query_raw, str) else ""
|
280 |
lang = body.get("lang", "EN")
|
281 |
image_base64 = body.get("image_base64", None)
|
|
|
282 |
# LLM Only
|
283 |
-
if not
|
284 |
logger.info("[BOT] LLM scenario.")
|
285 |
start = time.time()
|
286 |
# If image is present → diagnose first
|
287 |
image_diagnosis = ""
|
288 |
# Img size safe processor
|
289 |
-
|
|
|
290 |
return JSONResponse({"response": "⚠️ Image too large. Please upload smaller images (<5MB)."})
|
291 |
# LLM+VLM
|
292 |
if image_base64:
|
293 |
logger.info("[BOT] VLM+LLM scenario.")
|
294 |
-
|
295 |
-
image_diagnosis = process_medical_image(image_base64,
|
296 |
answer = chatbot.chat(user_id, query, lang, image_diagnosis)
|
297 |
elapsed = time.time() - start
|
298 |
# Final
|
|
|
279 |
query = query_raw.strip() if isinstance(query_raw, str) else ""
|
280 |
lang = body.get("lang", "EN")
|
281 |
image_base64 = body.get("image_base64", None)
|
282 |
+
img_desc = body.get("img_desc", "Describe and investigate any clinical findings from this medical image.")
|
283 |
# LLM Only
|
284 |
+
if not image_base64:
|
285 |
logger.info("[BOT] LLM scenario.")
|
286 |
start = time.time()
|
287 |
# If image is present → diagnose first
|
288 |
image_diagnosis = ""
|
289 |
# Img size safe processor
|
290 |
+
safe_load = len(image_base64.encode("utf-8"))
|
291 |
+
if image_base64 and safe_load > 5_000_000:
|
292 |
return JSONResponse({"response": "⚠️ Image too large. Please upload smaller images (<5MB)."})
|
293 |
# LLM+VLM
|
294 |
if image_base64:
|
295 |
logger.info("[BOT] VLM+LLM scenario.")
|
296 |
+
logger.info(f"[VLM] Process medical image size: {safe_load}, desc: {img_desc}, {lang}.")
|
297 |
+
image_diagnosis = process_medical_image(image_base64, img_desc, lang)
|
298 |
answer = chatbot.chat(user_id, query, lang, image_diagnosis)
|
299 |
elapsed = time.time() - start
|
300 |
# Final
|
vlm.py
CHANGED
@@ -7,7 +7,7 @@ from translation import translate_query
|
|
7 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
8 |
client = InferenceClient(provider="auto", api_key=HF_TOKEN)
|
9 |
|
10 |
-
logger = logging.getLogger("
|
11 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s — %(name)s — %(levelname)s — %(message)s", force=True) # Change INFO to DEBUG for full-ctx JSON loader
|
12 |
|
13 |
def process_medical_image(base64_image: str, prompt: str = None, lang: str = "EN") -> str:
|
@@ -34,5 +34,5 @@ def process_medical_image(base64_image: str, prompt: str = None, lang: str = "EN
|
|
34 |
logger.info(f"[VLM] MedGemma returned {result}")
|
35 |
return result
|
36 |
except Exception as e:
|
37 |
-
logger.error(f"⚠️ Error from image diagnosis model: {e}")
|
38 |
-
return f"⚠️ Error from image diagnosis model: {e}"
|
|
|
7 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
8 |
client = InferenceClient(provider="auto", api_key=HF_TOKEN)
|
9 |
|
10 |
+
logger = logging.getLogger("vlm-agent")
|
11 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s — %(name)s — %(levelname)s — %(message)s", force=True) # Change INFO to DEBUG for full-ctx JSON loader
|
12 |
|
13 |
def process_medical_image(base64_image: str, prompt: str = None, lang: str = "EN") -> str:
|
|
|
34 |
logger.info(f"[VLM] MedGemma returned {result}")
|
35 |
return result
|
36 |
except Exception as e:
|
37 |
+
logger.error(f"[VLM] ⚠️ Error from image diagnosis model: {e}")
|
38 |
+
return f"[VLM] ⚠️ Error from image diagnosis model: {e}"
|