Spaces:
Sleeping
Sleeping
File size: 6,841 Bytes
5067011 7dd1c93 c66e1bd 7dd1c93 71bcd31 afe76d4 6e237a4 afe76d4 1728da9 c66e1bd 7dd1c93 5067011 c66e1bd 1728da9 c66e1bd afe76d4 c66e1bd 1728da9 7dd1c93 5067011 c66e1bd 5067011 c66e1bd 5067011 1728da9 c66e1bd 5067011 1728da9 7dd1c93 c66e1bd 7dd1c93 c66e1bd 7dd1c93 c66e1bd 7dd1c93 c66e1bd 7dd1c93 c66e1bd 7dd1c93 c66e1bd 7dd1c93 afe76d4 5067011 afe76d4 c66e1bd afe76d4 5067011 1728da9 5067011 c4447f4 afe76d4 6d5190c 5067011 afe76d4 8b29c0d afe76d4 8b29c0d afe76d4 6d5190c b80af5b 7dd1c93 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import spaces
# Model configuration
LLAMA_MODEL = "meta-llama/Llama-2-7b-chat-hf"
MEDITRON_MODEL = "epfl-llm/meditron-7b"
SYSTEM_PROMPT = """You are a professional virtual doctor. Your goal is to collect detailed information about the user's health condition, symptoms, medical history, medications, lifestyle, and other relevant data.
Ask 1-2 follow-up questions at a time to gather more details about:
- Detailed description of symptoms
- Duration (when did it start?)
- Severity (scale of 1-10)
- Aggravating or alleviating factors
- Related symptoms
- Medical history
- Current medications and allergies
After collecting sufficient information (4-5 exchanges), summarize findings and suggest when they should seek professional care. Do NOT make specific diagnoses or recommend specific treatments.
Respond empathetically and clearly. Always be professional and thorough."""
MEDITRON_PROMPT = """<|im_start|>system
You are a specialized medical assistant focusing ONLY on suggesting over-the-counter medicines and home remedies based on patient information.
Based on the following patient information, provide ONLY:
1. One specific over-the-counter medicine with proper adult dosing instructions
2. One practical home remedy that might help
3. Clear guidance on when to seek professional medical care
Be concise, practical, and focus only on general symptom relief. Do not diagnose. Include a disclaimer that you are not a licensed medical professional.
<|im_end|>
<|im_start|>user
Patient information: {patient_info}
<|im_end|>
<|im_start|>assistant
"""
# Global variables to store models (will be loaded lazily)
llama_model = None
llama_tokenizer = None
meditron_model = None
meditron_tokenizer = None
conversation_turns = 0
patient_data = []
def build_llama2_prompt(system_prompt, history, user_input):
"""Format the conversation history and user input for Llama-2 chat models."""
prompt = f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n"
# Add conversation history
for user_msg, assistant_msg in history:
prompt += f"{user_msg} [/INST] {assistant_msg} </s><s>[INST] "
# Add the current user input
prompt += f"{user_input} [/INST] "
return prompt
@spaces.GPU
def load_models_if_needed():
"""Load models only when GPU is available and only if not already loaded."""
global llama_model, llama_tokenizer, meditron_model, meditron_tokenizer
if llama_model is None:
print("Loading Llama-2 model...")
llama_tokenizer = AutoTokenizer.from_pretrained(LLAMA_MODEL)
llama_model = AutoModelForCausalLM.from_pretrained(
LLAMA_MODEL,
torch_dtype=torch.float16,
device_map="auto"
)
print("Llama-2 model loaded successfully!")
if meditron_model is None:
print("Loading Meditron model...")
meditron_tokenizer = AutoTokenizer.from_pretrained(MEDITRON_MODEL)
meditron_model = AutoModelForCausalLM.from_pretrained(
MEDITRON_MODEL,
torch_dtype=torch.float16,
device_map="auto"
)
print("Meditron model loaded successfully!")
@spaces.GPU
def get_meditron_suggestions(patient_info):
"""Use Meditron model to generate medicine and remedy suggestions."""
load_models_if_needed() # Ensure models are loaded
prompt = MEDITRON_PROMPT.format(patient_info=patient_info)
inputs = meditron_tokenizer(prompt, return_tensors="pt")
# Move inputs to the same device as the model
if torch.cuda.is_available():
inputs = {k: v.to(meditron_model.device) for k, v in inputs.items()}
with torch.no_grad():
outputs = meditron_model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
max_new_tokens=256,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=meditron_tokenizer.eos_token_id
)
suggestion = meditron_tokenizer.decode(outputs[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True)
return suggestion
@spaces.GPU
def generate_response(message, history):
"""Generate a response using both models."""
global conversation_turns, patient_data
# Load models if needed
load_models_if_needed()
# Track conversation turns
conversation_turns += 1
# Store the entire conversation for reference
patient_data.append(message)
# Build the prompt with proper Llama-2 formatting
prompt = build_llama2_prompt(SYSTEM_PROMPT, history, message)
# Add summarization instruction after 4 turns
if conversation_turns >= 4:
prompt = prompt.replace("[/INST] ", "[/INST] Now summarize what you've learned and suggest when professional care may be needed. ")
inputs = llama_tokenizer(prompt, return_tensors="pt")
# Move inputs to the same device as the model
if torch.cuda.is_available():
inputs = {k: v.to(llama_model.device) for k, v in inputs.items()}
# Generate the Llama-2 response
with torch.no_grad():
outputs = llama_model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
max_new_tokens=512,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=llama_tokenizer.eos_token_id
)
# Decode and extract Llama-2's response
full_response = llama_tokenizer.decode(outputs[0], skip_special_tokens=False)
llama_response = full_response.split('[/INST]')[-1].split('</s>')[0].strip()
# After 4 turns, add medicine suggestions from Meditron
if conversation_turns >= 4:
# Collect full patient conversation
full_patient_info = "\n".join(patient_data) + "\n\nSummary: " + llama_response
# Get medicine suggestions
medicine_suggestions = get_meditron_suggestions(full_patient_info)
# Format final response
final_response = (
f"{llama_response}\n\n"
f"--- MEDICATION AND HOME CARE SUGGESTIONS ---\n\n"
f"{medicine_suggestions}"
)
return final_response
return llama_response
# Create the Gradio interface
demo = gr.ChatInterface(
fn=generate_response,
title="Medical Assistant with Medicine Suggestions",
description="Tell me about your symptoms, and after gathering enough information, I'll suggest potential remedies.",
examples=[
"I have a cough and my throat hurts",
"I've been having headaches for a week",
"My stomach has been hurting since yesterday"
],
theme="soft"
)
if __name__ == "__main__":
demo.launch()
|