Spaces:
Sleeping
Sleeping
import gradio as gr | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import spaces | |
# Model configuration | |
LLAMA_MODEL = "meta-llama/Llama-2-7b-chat-hf" | |
MEDITRON_MODEL = "epfl-llm/meditron-7b" | |
SYSTEM_PROMPT = """You are a professional virtual doctor. Your goal is to collect detailed information about the user's health condition, symptoms, medical history, medications, lifestyle, and other relevant data. | |
Ask 1-2 follow-up questions at a time to gather more details about: | |
- Detailed description of symptoms | |
- Duration (when did it start?) | |
- Severity (scale of 1-10) | |
- Aggravating or alleviating factors | |
- Related symptoms | |
- Medical history | |
- Current medications and allergies | |
After collecting sufficient information (4-5 exchanges), summarize findings and suggest when they should seek professional care. Do NOT make specific diagnoses or recommend specific treatments. | |
Respond empathetically and clearly. Always be professional and thorough.""" | |
MEDITRON_PROMPT = """<|im_start|>system | |
You are a specialized medical assistant focusing ONLY on suggesting over-the-counter medicines and home remedies based on patient information. | |
Based on the following patient information, provide ONLY: | |
1. One specific over-the-counter medicine with proper adult dosing instructions | |
2. One practical home remedy that might help | |
3. Clear guidance on when to seek professional medical care | |
Be concise, practical, and focus only on general symptom relief. Do not diagnose. Include a disclaimer that you are not a licensed medical professional. | |
<|im_end|> | |
<|im_start|>user | |
Patient information: {patient_info} | |
<|im_end|> | |
<|im_start|>assistant | |
""" | |
# Global variables to store models (will be loaded lazily) | |
llama_model = None | |
llama_tokenizer = None | |
meditron_model = None | |
meditron_tokenizer = None | |
conversation_turns = 0 | |
patient_data = [] | |
def build_llama2_prompt(system_prompt, history, user_input): | |
"""Format the conversation history and user input for Llama-2 chat models.""" | |
prompt = f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n" | |
# Add conversation history | |
for user_msg, assistant_msg in history: | |
prompt += f"{user_msg} [/INST] {assistant_msg} </s><s>[INST] " | |
# Add the current user input | |
prompt += f"{user_input} [/INST] " | |
return prompt | |
def load_models_if_needed(): | |
"""Load models only when GPU is available and only if not already loaded.""" | |
global llama_model, llama_tokenizer, meditron_model, meditron_tokenizer | |
if llama_model is None: | |
print("Loading Llama-2 model...") | |
llama_tokenizer = AutoTokenizer.from_pretrained(LLAMA_MODEL) | |
llama_model = AutoModelForCausalLM.from_pretrained( | |
LLAMA_MODEL, | |
torch_dtype=torch.float16, | |
device_map="auto" | |
) | |
print("Llama-2 model loaded successfully!") | |
if meditron_model is None: | |
print("Loading Meditron model...") | |
meditron_tokenizer = AutoTokenizer.from_pretrained(MEDITRON_MODEL) | |
meditron_model = AutoModelForCausalLM.from_pretrained( | |
MEDITRON_MODEL, | |
torch_dtype=torch.float16, | |
device_map="auto" | |
) | |
print("Meditron model loaded successfully!") | |
def get_meditron_suggestions(patient_info): | |
"""Use Meditron model to generate medicine and remedy suggestions.""" | |
load_models_if_needed() # Ensure models are loaded | |
prompt = MEDITRON_PROMPT.format(patient_info=patient_info) | |
inputs = meditron_tokenizer(prompt, return_tensors="pt") | |
# Move inputs to the same device as the model | |
if torch.cuda.is_available(): | |
inputs = {k: v.to(meditron_model.device) for k, v in inputs.items()} | |
with torch.no_grad(): | |
outputs = meditron_model.generate( | |
inputs["input_ids"], | |
attention_mask=inputs["attention_mask"], | |
max_new_tokens=256, | |
temperature=0.7, | |
top_p=0.9, | |
do_sample=True, | |
pad_token_id=meditron_tokenizer.eos_token_id | |
) | |
suggestion = meditron_tokenizer.decode(outputs[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True) | |
return suggestion | |
def generate_response(message, history): | |
"""Generate a response using both models.""" | |
global conversation_turns, patient_data | |
# Load models if needed | |
load_models_if_needed() | |
# Track conversation turns | |
conversation_turns += 1 | |
# Store the entire conversation for reference | |
patient_data.append(message) | |
# Build the prompt with proper Llama-2 formatting | |
prompt = build_llama2_prompt(SYSTEM_PROMPT, history, message) | |
# Add summarization instruction after 4 turns | |
if conversation_turns >= 4: | |
prompt = prompt.replace("[/INST] ", "[/INST] Now summarize what you've learned and suggest when professional care may be needed. ") | |
inputs = llama_tokenizer(prompt, return_tensors="pt") | |
# Move inputs to the same device as the model | |
if torch.cuda.is_available(): | |
inputs = {k: v.to(llama_model.device) for k, v in inputs.items()} | |
# Generate the Llama-2 response | |
with torch.no_grad(): | |
outputs = llama_model.generate( | |
inputs["input_ids"], | |
attention_mask=inputs["attention_mask"], | |
max_new_tokens=512, | |
temperature=0.7, | |
top_p=0.9, | |
do_sample=True, | |
pad_token_id=llama_tokenizer.eos_token_id | |
) | |
# Decode and extract Llama-2's response | |
full_response = llama_tokenizer.decode(outputs[0], skip_special_tokens=False) | |
llama_response = full_response.split('[/INST]')[-1].split('</s>')[0].strip() | |
# After 4 turns, add medicine suggestions from Meditron | |
if conversation_turns >= 4: | |
# Collect full patient conversation | |
full_patient_info = "\n".join(patient_data) + "\n\nSummary: " + llama_response | |
# Get medicine suggestions | |
medicine_suggestions = get_meditron_suggestions(full_patient_info) | |
# Format final response | |
final_response = ( | |
f"{llama_response}\n\n" | |
f"--- MEDICATION AND HOME CARE SUGGESTIONS ---\n\n" | |
f"{medicine_suggestions}" | |
) | |
return final_response | |
return llama_response | |
# Create the Gradio interface | |
demo = gr.ChatInterface( | |
fn=generate_response, | |
title="Medical Assistant with Medicine Suggestions", | |
description="Tell me about your symptoms, and after gathering enough information, I'll suggest potential remedies.", | |
examples=[ | |
"I have a cough and my throat hurts", | |
"I've been having headaches for a week", | |
"My stomach has been hurting since yesterday" | |
], | |
theme="soft" | |
) | |
if __name__ == "__main__": | |
demo.launch() | |