Spaces:
Running
Running
File size: 1,169 Bytes
6db39d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
# vlm.py
import os
from huggingface_hub import InferenceClient
from translation import translate_query
# Initialise once
HF_TOKEN = os.getenv("HF_TOKEN")
client = InferenceClient(provider="auto", api_key=HF_TOKEN)
def process_medical_image(base64_image: str, prompt: str = None, lang: str = "EN") -> str:
"""
Send base64 image + prompt to MedGEMMA and return output.
"""
if not prompt:
prompt = "Describe and investigate any clinical findings from this medical image."
elif prompt and (lang.upper() in {"VI", "ZH"}):
user_query = translate_query(user_query, lang.lower())
# Send over API
try:
response = client.chat.completions.create(
model="google/medgemma-4b-it",
messages=[{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
]
}]
)
return response.choices[0].message.content.strip()
except Exception as e:
return f"⚠️ Error from image diagnosis model: {e}"
|