the updated ones
Browse files
app.py
CHANGED
@@ -1,10 +1,12 @@
|
|
1 |
import streamlit as st
|
2 |
-
from
|
3 |
-
from code.helper import prepare_symptoms_array
|
4 |
|
5 |
-
#
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
8 |
|
9 |
# Set page width to wide
|
10 |
st.set_page_config(layout='wide')
|
@@ -12,7 +14,7 @@ st.set_page_config(layout='wide')
|
|
12 |
# Custom CSS for background color and text color
|
13 |
st.markdown(
|
14 |
"""
|
15 |
-
|
16 |
.stApp {
|
17 |
background-color: #efefef !important;
|
18 |
color: black !important;
|
@@ -23,36 +25,85 @@ st.markdown(
|
|
23 |
button, .stButton>button {
|
24 |
color: black !important;
|
25 |
}
|
26 |
-
header {display: none !important;}
|
27 |
</style>
|
28 |
""",
|
29 |
unsafe_allow_html=True
|
30 |
)
|
31 |
|
32 |
-
#
|
33 |
-
st.
|
34 |
-
st.
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
-
#
|
38 |
-
|
39 |
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
-
|
|
|
43 |
|
44 |
-
#
|
45 |
-
if
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
49 |
|
50 |
-
|
|
|
51 |
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
for i in range(4):
|
58 |
-
st.write(f'{i+1}. {precautions[i]}')
|
|
|
1 |
import streamlit as st
|
2 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
|
3 |
|
4 |
+
# Set up AI model
|
5 |
+
llm = ChatGoogleGenerativeAI(
|
6 |
+
model="gemini-1.5-flash", # Free model
|
7 |
+
google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE",
|
8 |
+
temperature=0.5
|
9 |
+
)
|
10 |
|
11 |
# Set page width to wide
|
12 |
st.set_page_config(layout='wide')
|
|
|
14 |
# Custom CSS for background color and text color
|
15 |
st.markdown(
|
16 |
"""
|
17 |
+
<style>
|
18 |
.stApp {
|
19 |
background-color: #efefef !important;
|
20 |
color: black !important;
|
|
|
25 |
button, .stButton>button {
|
26 |
color: black !important;
|
27 |
}
|
28 |
+
header, iframe {display: none !important;}
|
29 |
</style>
|
30 |
""",
|
31 |
unsafe_allow_html=True
|
32 |
)
|
33 |
|
34 |
+
# Streamlit UI
|
35 |
+
st.title("AI-Driven Health Assistant")
|
36 |
+
st.write("Welcome AI-Driven Health Assistant! Simply enter your symptoms or disease name, and get accurate medicine suggestions instantly. Stay informed, stay healthy!")
|
37 |
+
|
38 |
+
# User Input
|
39 |
+
user_question = st.text_input("Type your symptoms or disease name, and let CureBot unlock the right cure for you—fast, smart, and AI-powered")
|
40 |
+
|
41 |
+
# Function to filter AI disclaimers
|
42 |
+
def is_valid_response(response_text):
|
43 |
+
disclaimers = [
|
44 |
+
"I am an AI and cannot give medical advice",
|
45 |
+
"Seek medical attention",
|
46 |
+
"Consult a doctor",
|
47 |
+
"Contact your doctor",
|
48 |
+
"Go to an emergency room",
|
49 |
+
]
|
50 |
+
return not any(phrase.lower() in response_text.lower() for phrase in disclaimers)
|
51 |
+
|
52 |
+
# Process User Query
|
53 |
+
if st.button("Get Recommendation"):
|
54 |
+
if user_question.strip():
|
55 |
+
# Ensure the AI provides both medicine and alternative treatments
|
56 |
+
formatted_question = (
|
57 |
+
f"Without any disclaimer, recommend medicine for {user_question}. "
|
58 |
+
f"5 medicine names "
|
59 |
+
f"Also, provide alternative treatments such as home remedies, lifestyle changes, exercises, or dietary suggestions. "
|
60 |
+
f"Only for learning purposes, not for treatment."
|
61 |
+
)
|
62 |
+
|
63 |
+
with st.spinner("Analyzing..."):
|
64 |
+
response = llm.invoke(formatted_question)
|
65 |
|
66 |
+
# Extract text content
|
67 |
+
response_text = response.content if hasattr(response, "content") else str(response)
|
68 |
|
69 |
+
# Check if response is valid
|
70 |
+
if is_valid_response(response_text):
|
71 |
+
st.success("✨ Analysis complete! Here are the best medicine recommendations for you: 🔽")
|
72 |
+
st.write(response_text)
|
73 |
+
else:
|
74 |
+
st.warning("⚠️ Oops! It looks like the input is unclear or incorrect. Please enter a valid disease name or symptoms to get accurate recommendations")
|
75 |
+
# Retry with a refined prompt
|
76 |
+
better_prompt = (
|
77 |
+
f"Strictly provide a detailed answer including:\n"
|
78 |
+
f"1. Medicine names\n"
|
79 |
+
f"2. Home remedies\n"
|
80 |
+
f"3. Lifestyle changes\n"
|
81 |
+
f"4. Exercises\n"
|
82 |
+
f"5. Diet recommendations\n"
|
83 |
+
f"Do not include any disclaimers. The response should be clear and structured."
|
84 |
+
)
|
85 |
+
retry_response = llm.invoke(better_prompt)
|
86 |
|
87 |
+
# Extract text from retry response
|
88 |
+
retry_response_text = retry_response.content if hasattr(retry_response, "content") else str(retry_response)
|
89 |
|
90 |
+
# Display the retried response if valid
|
91 |
+
if is_valid_response(retry_response_text):
|
92 |
+
st.success("Here is the refined information:")
|
93 |
+
st.write(retry_response_text)
|
94 |
+
else:
|
95 |
+
st.error("Unable to get a useful response. Try rephrasing your question.")
|
96 |
|
97 |
+
else:
|
98 |
+
st.warning("Please enter a question!")
|
99 |
|
100 |
+
# Emergency Contact Button
|
101 |
+
if st.button("Emergency Contact"):
|
102 |
+
st.subheader("📞 Emergency Contacts")
|
103 |
+
st.write("- 🚑 *Ambulance:* 102")
|
104 |
+
st.write("- 🏥 *National Health Helpline:* 108")
|
105 |
+
st.write("- ☎ *COVID-19 Helpline:* 1075")
|
106 |
+
st.write("- 🚓 *Police:* 100")
|
107 |
|
108 |
+
# Footer
|
109 |
+
st.markdown("---")
|
|
|
|