Spaces:
Running
on
Zero
Running
on
Zero
Thanush
Update SYSTEM_PROMPT in app.py for clarity and detail in patient information collection process
6e237a4
import gradio as gr | |
import spaces | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from langchain.memory import ConversationBufferMemory | |
# Model configuration | |
LLAMA_MODEL = "meta-llama/Llama-2-7b-chat-hf" | |
MEDITRON_MODEL = "epfl-llm/meditron-7b" | |
SYSTEM_PROMPT = """You are a professional virtual doctor. Your goal is to collect detailed information about the user's name, age, health condition, symptoms, medical history, medications, lifestyle, and other relevant data. | |
Always begin by asking for the user's name and age if not already provided. | |
Ask 1-2 follow-up questions at a time to gather more details about: | |
- Detailed description of symptoms | |
- Duration (when did it start?) | |
- Severity (scale of 1-10) | |
- Aggravating or alleviating factors | |
- Related symptoms | |
- Medical history | |
- Current medications and allergies | |
After collecting sufficient information (at least 4-5 exchanges, but continue up to 10 if the user keeps responding), summarize findings, provide a likely diagnosis (if possible), and suggest when they should seek professional care. | |
If enough information is collected, provide a concise, general diagnosis and a practical over-the-counter medicine and home remedy suggestion. | |
Do NOT make specific prescriptions for prescription-only drugs. | |
Respond empathetically and clearly. Always be professional and thorough.""" | |
MEDITRON_PROMPT = """<|im_start|>system | |
You are a specialized medical assistant focusing ONLY on suggesting over-the-counter medicines and home remedies based on patient information. | |
Based on the following patient information, provide ONLY: | |
1. One specific over-the-counter medicine with proper adult dosing instructions | |
2. One practical home remedy that might help | |
3. Clear guidance on when to seek professional medical care | |
Be concise, practical, and focus only on general symptom relief. Do not diagnose. Include a disclaimer that you are not a licensed medical professional. | |
<|im_end|> | |
<|im_start|>user | |
Patient information: {patient_info} | |
<|im_end|> | |
<|im_start|>assistant | |
""" | |
print("Loading Llama-2 model...") | |
tokenizer = AutoTokenizer.from_pretrained(LLAMA_MODEL) | |
model = AutoModelForCausalLM.from_pretrained( | |
LLAMA_MODEL, | |
torch_dtype=torch.float16, | |
device_map="auto" | |
) | |
print("Llama-2 model loaded successfully!") | |
print("Loading Meditron model...") | |
meditron_tokenizer = AutoTokenizer.from_pretrained(MEDITRON_MODEL) | |
meditron_model = AutoModelForCausalLM.from_pretrained( | |
MEDITRON_MODEL, | |
torch_dtype=torch.float16, | |
device_map="auto" | |
) | |
print("Meditron model loaded successfully!") | |
# Initialize LangChain memory | |
memory = ConversationBufferMemory(return_messages=True) | |
def build_llama2_prompt(system_prompt, messages, user_input): | |
"""Format the conversation history and user input for Llama-2 chat models, using the full message sequence.""" | |
prompt = f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n" | |
for msg in messages: | |
if msg.type == "human": | |
prompt += f"{msg.content} [/INST] " | |
elif msg.type == "ai": | |
prompt += f"{msg.content} </s><s>[INST] " | |
prompt += f"{user_input} [/INST] " | |
return prompt | |
def get_meditron_suggestions(patient_info): | |
"""Use Meditron model to generate medicine and remedy suggestions.""" | |
prompt = MEDITRON_PROMPT.format(patient_info=patient_info) | |
inputs = meditron_tokenizer(prompt, return_tensors="pt").to(meditron_model.device) | |
with torch.no_grad(): | |
outputs = meditron_model.generate( | |
inputs.input_ids, | |
attention_mask=inputs.attention_mask, | |
max_new_tokens=256, | |
temperature=0.7, | |
top_p=0.9, | |
do_sample=True | |
) | |
suggestion = meditron_tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True) | |
return suggestion | |
def generate_response(message, history): | |
"""Generate a response using both models, with full context.""" | |
# Save the latest user message and last assistant response to memory | |
if history and len(history[-1]) == 2: | |
memory.save_context({"input": history[-1][0]}, {"output": history[-1][1]}) | |
memory.save_context({"input": message}, {"output": ""}) | |
# Use the full message sequence from memory | |
messages = memory.chat_memory.messages | |
# Build the prompt with the full message sequence | |
prompt = build_llama2_prompt(SYSTEM_PROMPT, messages, message) | |
# Add summarization instruction after 4 turns (count human messages) | |
num_user_turns = sum(1 for m in messages if m.type == "human") | |
if num_user_turns >= 4: | |
prompt = prompt.replace("[/INST] ", "[/INST] Now summarize what you've learned and suggest when professional care may be needed. ") | |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
# Generate the Llama-2 response | |
with torch.no_grad(): | |
outputs = model.generate( | |
inputs.input_ids, | |
attention_mask=inputs.attention_mask, | |
max_new_tokens=512, | |
temperature=0.7, | |
top_p=0.9, | |
do_sample=True, | |
pad_token_id=tokenizer.eos_token_id | |
) | |
# Decode and extract Llama-2's response | |
full_response = tokenizer.decode(outputs[0], skip_special_tokens=False) | |
llama_response = full_response.split('[/INST]')[-1].split('</s>')[0].strip() | |
# After 4 turns, add medicine suggestions from Meditron | |
if num_user_turns >= 4: | |
# Collect full patient conversation (all user messages) | |
full_patient_info = "\n".join([m.content for m in messages if m.type == "human"] + [message]) + "\n\nSummary: " + llama_response | |
# Get medicine suggestions | |
medicine_suggestions = get_meditron_suggestions(full_patient_info) | |
# Format final response | |
final_response = ( | |
f"{llama_response}\n\n" | |
f"--- MEDICATION AND HOME CARE SUGGESTIONS ---\n\n" | |
f"{medicine_suggestions}" | |
) | |
return final_response | |
return llama_response | |
# Create the Gradio interface | |
demo = gr.ChatInterface( | |
fn=generate_response, | |
title="Medical Assistant with Medicine Suggestions", | |
description="Tell me about your symptoms, and after gathering enough information, I'll suggest potential remedies.", | |
examples=[ | |
"I have a cough and my throat hurts", | |
"I've been having headaches for a week", | |
"My stomach has been hurting since yesterday" | |
], | |
theme="soft" | |
) | |
if __name__ == "__main__": | |
demo.launch() |