Upload 2 files
Browse files- app.py +322 -0
- requirements.txt +6 -0
app.py
ADDED
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
# # # # # from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
+
|
5 |
+
|
6 |
+
|
7 |
+
# # # # # llm = ChatGoogleGenerativeAI(
|
8 |
+
# # # # # model="gemini-1.5-flash",
|
9 |
+
# # # # # google_api_key='AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE',
|
10 |
+
# # # # # temperature=0.2)
|
11 |
+
|
12 |
+
# # # # # poem = llm.invoke("Write a poem on love for burger")
|
13 |
+
# # # # # print(poem)
|
14 |
+
|
15 |
+
|
16 |
+
# # # # import streamlit as st
|
17 |
+
# # # # from langchain_google_genai import ChatGoogleGenerativeAI
|
18 |
+
|
19 |
+
# # # # # Set up the AI model
|
20 |
+
# # # # llm = ChatGoogleGenerativeAI(
|
21 |
+
# # # # model="gemini-1.5-flash", # Free model
|
22 |
+
# # # # google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE",
|
23 |
+
# # # # temperature=0.5
|
24 |
+
# # # # )
|
25 |
+
|
26 |
+
# # # # # Streamlit UI
|
27 |
+
# # # # st.title("🩺 Healthcare AI Assistant")
|
28 |
+
# # # # st.write("Ask me anything about health, symptoms, diet, or general medical advice!")
|
29 |
+
|
30 |
+
# # # # # User Input
|
31 |
+
# # # # user_question = st.text_input("Enter your health-related question:")
|
32 |
+
|
33 |
+
# # # # # Process User Query
|
34 |
+
# # # # if st.button("Get Recommendation"):
|
35 |
+
# # # # if user_question.strip():
|
36 |
+
# # # # with st.spinner("Analyzing..."):
|
37 |
+
# # # # response = llm.invoke(user_question)
|
38 |
+
# # # # st.success("Recommendation:")
|
39 |
+
# # # # st.write(response)
|
40 |
+
# # # # else:
|
41 |
+
# # # # st.warning("Please enter a question!")
|
42 |
+
|
43 |
+
# # # # # Footer
|
44 |
+
# # # # st.markdown("---")
|
45 |
+
# # # # st.markdown("💡 *Disclaimer: This AI assistant provides general health information. Always consult a doctor for medical concerns.*")
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
# # # import streamlit as st
|
50 |
+
# # # from langchain_google_genai import ChatGoogleGenerativeAI
|
51 |
+
|
52 |
+
# # # # Set up AI model
|
53 |
+
# # # llm = ChatGoogleGenerativeAI(
|
54 |
+
# # # model="gemini-1.5-flash", # Free model
|
55 |
+
# # # google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE",
|
56 |
+
# # # temperature=0.5
|
57 |
+
# # # )
|
58 |
+
|
59 |
+
# # # # Streamlit UI
|
60 |
+
# # # st.title("🩺 AI Healthcare Learning Assistant")
|
61 |
+
# # # st.write("Ask me anything about healthcare, symptoms, diet, or medical learning!")
|
62 |
+
|
63 |
+
# # # # User Input
|
64 |
+
# # # user_question = st.text_input("Enter your healthcare question:")
|
65 |
+
|
66 |
+
# # # # Function to filter AI disclaimers
|
67 |
+
# # # def is_valid_response(response):
|
68 |
+
# # # disclaimers = [
|
69 |
+
# # # "I am an AI and cannot give medical advice",
|
70 |
+
# # # "Seek medical attention",
|
71 |
+
# # # "Consult a doctor",
|
72 |
+
# # # "Contact your doctor",
|
73 |
+
# # # "Go to an emergency room",
|
74 |
+
# # # ]
|
75 |
+
# # # return not any(phrase.lower() in response.lower() for phrase in disclaimers)
|
76 |
+
|
77 |
+
# # # # Process User Query
|
78 |
+
# # # if st.button("Get Information"):
|
79 |
+
# # # if user_question.strip():
|
80 |
+
# # # with st.spinner("Analyzing..."):
|
81 |
+
# # # response = llm.invoke(user_question)
|
82 |
+
|
83 |
+
# # # # Check if response is valid
|
84 |
+
# # # if is_valid_response(response):
|
85 |
+
# # # st.success("Here is the relevant information:")
|
86 |
+
# # # st.write(response)
|
87 |
+
# # # else:
|
88 |
+
# # # st.warning("AI provided a disclaimer. Trying again...")
|
89 |
+
# # # # Modify prompt to avoid disclaimers
|
90 |
+
# # # better_prompt = f"Give a well-explained answer for educational purposes only: {user_question}"
|
91 |
+
# # # retry_response = llm.invoke(better_prompt)
|
92 |
+
|
93 |
+
# # # # Display the retried response if it's valid
|
94 |
+
# # # if is_valid_response(retry_response):
|
95 |
+
# # # st.success("Here is the refined information:")
|
96 |
+
# # # st.write(retry_response)
|
97 |
+
# # # else:
|
98 |
+
# # # st.error("Unable to get a useful response. Try rephrasing your question.")
|
99 |
+
|
100 |
+
# # # else:
|
101 |
+
# # # st.warning("Please enter a question!")
|
102 |
+
|
103 |
+
# # # # Footer
|
104 |
+
# # # st.markdown("---")
|
105 |
+
# # # st.markdown("💡 *This AI provides learning-based medical insights, not actual medical advice.*")
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
# # import streamlit as st
|
110 |
+
# # from langchain_google_genai import ChatGoogleGenerativeAI
|
111 |
+
|
112 |
+
# # # Set up AI model
|
113 |
+
# # llm = ChatGoogleGenerativeAI(
|
114 |
+
# # model="gemini-1.5-flash", # Free model
|
115 |
+
# # google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE",
|
116 |
+
# # temperature=0.5
|
117 |
+
# # )
|
118 |
+
|
119 |
+
# # # Streamlit UI
|
120 |
+
# # st.title("🩺 AI Healthcare Learning Assistant")
|
121 |
+
# # st.write("Ask me anything about healthcare, symptoms, diet, or medical learning!")
|
122 |
+
|
123 |
+
# # # User Input
|
124 |
+
# # user_question = st.text_input("Enter your healthcare question:")
|
125 |
+
|
126 |
+
# # # Function to filter AI disclaimers
|
127 |
+
# # def is_valid_response(response_text):
|
128 |
+
# # disclaimers = [
|
129 |
+
# # "I am an AI and cannot give medical advice",
|
130 |
+
# # "Seek medical attention",
|
131 |
+
# # "Consult a doctor",
|
132 |
+
# # "Contact your doctor",
|
133 |
+
# # "Go to an emergency room",
|
134 |
+
# # ]
|
135 |
+
# # return not any(phrase.lower() in response_text.lower() for phrase in disclaimers)
|
136 |
+
|
137 |
+
# # # Process User Query
|
138 |
+
# # if st.button("Get Information"):
|
139 |
+
# # if user_question.strip():
|
140 |
+
# # with st.spinner("Analyzing..."):
|
141 |
+
# # response = llm.invoke(user_question)
|
142 |
+
|
143 |
+
# # # Extract the text content from AIMessage
|
144 |
+
# # response_text = response.content if hasattr(response, "content") else str(response)
|
145 |
+
|
146 |
+
# # # Check if response is valid
|
147 |
+
# # if is_valid_response(response_text):
|
148 |
+
# # st.success("Here is the relevant information:")
|
149 |
+
# # st.write(response_text)
|
150 |
+
# # else:
|
151 |
+
# # st.warning("AI provided a disclaimer. Trying again...")
|
152 |
+
# # # Modify prompt to avoid disclaimers
|
153 |
+
# # better_prompt = f"Give a well-explained answer for educational purposes only: {user_question}"
|
154 |
+
# # retry_response = llm.invoke(better_prompt)
|
155 |
+
|
156 |
+
# # # Extract text from the retried response
|
157 |
+
# # retry_response_text = retry_response.content if hasattr(retry_response, "content") else str(retry_response)
|
158 |
+
|
159 |
+
# # # Display the retried response if it's valid
|
160 |
+
# # if is_valid_response(retry_response_text):
|
161 |
+
# # st.success("Here is the refined information:")
|
162 |
+
# # st.write(retry_response_text)
|
163 |
+
# # else:
|
164 |
+
# # st.error("Unable to get a useful response. Try rephrasing your question.")
|
165 |
+
|
166 |
+
# # else:
|
167 |
+
# # st.warning("Please enter a question!")
|
168 |
+
|
169 |
+
# # # Footer
|
170 |
+
# # st.markdown("---")
|
171 |
+
# # st.markdown("💡 *This AI provides learning-based medical insights, not actual medical advice.*")
|
172 |
+
|
173 |
+
|
174 |
+
|
175 |
+
|
176 |
+
# import streamlit as st
|
177 |
+
# from langchain_google_genai import ChatGoogleGenerativeAI
|
178 |
+
|
179 |
+
# # Set up AI model
|
180 |
+
# llm = ChatGoogleGenerativeAI(
|
181 |
+
# model="gemini-1.5-flash", # Free model
|
182 |
+
# google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE",
|
183 |
+
# temperature=0.5
|
184 |
+
# )
|
185 |
+
|
186 |
+
# # Streamlit UI
|
187 |
+
# st.title("🩺 AI Healthcare Learning Assistant")
|
188 |
+
# st.write("Ask about symptoms, medicines, and alternative treatments.")
|
189 |
+
|
190 |
+
# # User Input
|
191 |
+
# user_question = st.text_input("Enter your health-related query:")
|
192 |
+
|
193 |
+
# # Function to filter AI disclaimers
|
194 |
+
# def is_valid_response(response_text):
|
195 |
+
# disclaimers = [
|
196 |
+
# "I am an AI and cannot give medical advice",
|
197 |
+
# "Seek medical attention",
|
198 |
+
# "Consult a doctor",
|
199 |
+
# "Contact your doctor",
|
200 |
+
# "Go to an emergency room",
|
201 |
+
# ]
|
202 |
+
# return not any(phrase.lower() in response_text.lower() for phrase in disclaimers)
|
203 |
+
|
204 |
+
# # Process User Query
|
205 |
+
# if st.button("Get Recommendation"):
|
206 |
+
# if user_question.strip():
|
207 |
+
# # Modify user input to ensure the model focuses on learning-based insights
|
208 |
+
# formatted_question = f"Without any disclaimer, recommend me medicine for {user_question}. Only for learning purposes, not for treatment. Also, suggest some different ways to cure it."
|
209 |
+
|
210 |
+
# with st.spinner("Analyzing..."):
|
211 |
+
# response = llm.invoke(formatted_question)
|
212 |
+
|
213 |
+
# # Extract text content
|
214 |
+
# response_text = response.content if hasattr(response, "content") else str(response)
|
215 |
+
|
216 |
+
# # # Check if response is valid
|
217 |
+
# if is_valid_response(response_text):
|
218 |
+
# st.success("Here is the relevant information:")
|
219 |
+
# st.write(response_text)
|
220 |
+
# else:
|
221 |
+
# st.warning("AI provided a disclaimer. Trying again...")
|
222 |
+
# # Retry with a refined prompt
|
223 |
+
# better_prompt = f"Its only for leaning purpose not for treatment provide medicine names and alternative treatments for: {user_question}"
|
224 |
+
# retry_response = llm.invoke(better_prompt)
|
225 |
+
|
226 |
+
# # Extract text from retry response
|
227 |
+
# retry_response_text = retry_response.content if hasattr(retry_response, "content") else str(retry_response)
|
228 |
+
|
229 |
+
# # # Display the retried response if valid
|
230 |
+
# # if is_valid_response(retry_response_text):
|
231 |
+
# # st.success("Here is the refined information:")
|
232 |
+
# # st.write(retry_response_text)
|
233 |
+
# # else:
|
234 |
+
# # st.error("Unable to get a useful response. Try rephrasing your question.")
|
235 |
+
|
236 |
+
# else:
|
237 |
+
# st.warning("Please enter a question!")
|
238 |
+
|
239 |
+
# # Footer
|
240 |
+
# st.markdown("---")
|
241 |
+
# st.markdown("💡 *This AI provides learning-based medical insights, not actual medical advice.*")
|
242 |
+
|
243 |
+
|
244 |
+
import streamlit as st
|
245 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
246 |
+
|
247 |
+
# Set up AI model
|
248 |
+
llm = ChatGoogleGenerativeAI(
|
249 |
+
model="gemini-1.5-flash", # Free model
|
250 |
+
google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE",
|
251 |
+
temperature=0.5
|
252 |
+
)
|
253 |
+
|
254 |
+
# Streamlit UI
|
255 |
+
st.title("🩺 AI Healthcare Learning Assistant")
|
256 |
+
st.write("Get medicine recommendations and alternative treatments for learning purposes.")
|
257 |
+
|
258 |
+
# User Input
|
259 |
+
user_question = st.text_input("Enter your health-related query:")
|
260 |
+
|
261 |
+
# Function to filter AI disclaimers
|
262 |
+
def is_valid_response(response_text):
|
263 |
+
disclaimers = [
|
264 |
+
"I am an AI and cannot give medical advice",
|
265 |
+
"Seek medical attention",
|
266 |
+
"Consult a doctor",
|
267 |
+
"Contact your doctor",
|
268 |
+
"Go to an emergency room",
|
269 |
+
]
|
270 |
+
return not any(phrase.lower() in response_text.lower() for phrase in disclaimers)
|
271 |
+
|
272 |
+
# Process User Query
|
273 |
+
if st.button("Get Recommendation"):
|
274 |
+
if user_question.strip():
|
275 |
+
# Ensure the AI provides both medicine and alternative treatments
|
276 |
+
formatted_question = (
|
277 |
+
f"Without any disclaimer, recommend medicine for {user_question}. "
|
278 |
+
f"Also, provide alternative treatments such as home remedies, lifestyle changes, exercises, or dietary suggestions. "
|
279 |
+
f"Only for learning purposes, not for treatment."
|
280 |
+
)
|
281 |
+
|
282 |
+
with st.spinner("Analyzing..."):
|
283 |
+
response = llm.invoke(formatted_question)
|
284 |
+
|
285 |
+
# Extract text content
|
286 |
+
response_text = response.content if hasattr(response, "content") else str(response)
|
287 |
+
|
288 |
+
# Check if response is valid
|
289 |
+
if is_valid_response(response_text):
|
290 |
+
st.success("Here is the relevant information:")
|
291 |
+
st.write(response_text)
|
292 |
+
else:
|
293 |
+
st.warning("AI provided a disclaimer. Trying again...")
|
294 |
+
# Retry with a refined prompt
|
295 |
+
better_prompt = (
|
296 |
+
f"Strictly provide a detailed answer including:\n"
|
297 |
+
f"1. Medicine names\n"
|
298 |
+
f"2. Home remedies\n"
|
299 |
+
f"3. Lifestyle changes\n"
|
300 |
+
f"4. Exercises\n"
|
301 |
+
f"5. Diet recommendations\n"
|
302 |
+
f"Do not include any disclaimers. The response should be clear and structured."
|
303 |
+
)
|
304 |
+
retry_response = llm.invoke(better_prompt)
|
305 |
+
|
306 |
+
# Extract text from retry response
|
307 |
+
retry_response_text = retry_response.content if hasattr(retry_response, "content") else str(retry_response)
|
308 |
+
|
309 |
+
# Display the retried response if valid
|
310 |
+
if is_valid_response(retry_response_text):
|
311 |
+
st.success("Here is the refined information:")
|
312 |
+
st.write(retry_response_text)
|
313 |
+
else:
|
314 |
+
st.error("Unable to get a useful response. Try rephrasing your question.")
|
315 |
+
|
316 |
+
else:
|
317 |
+
st.warning("Please enter a question!")
|
318 |
+
|
319 |
+
# Footer
|
320 |
+
st.markdown("---")
|
321 |
+
st.markdown("💡 *This AI provides learning-based medical insights, not actual medical advice.*")
|
322 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain
|
2 |
+
google-generativeai
|
3 |
+
langchain_experimental
|
4 |
+
langchain-community
|
5 |
+
langchain-google-genai
|
6 |
+
streamlit
|