MayankGupta06's picture
Update app.py
08f9319 verified
raw
history blame
17.4 kB
# # # # # from langchain_google_genai import ChatGoogleGenerativeAI
# # # # # llm = ChatGoogleGenerativeAI(
# # # # # model="gemini-1.5-flash",
# # # # # google_api_key='AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE',
# # # # # temperature=0.2)
# # # # # poem = llm.invoke("Write a poem on love for burger")
# # # # # print(poem)
# # # # import streamlit as st
# # # # from langchain_google_genai import ChatGoogleGenerativeAI
# # # # # Set up the AI model
# # # # llm = ChatGoogleGenerativeAI(
# # # # model="gemini-1.5-flash", # Free model
# # # # google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE",
# # # # temperature=0.5
# # # # )
# # # # # Streamlit UI
# # # # st.title("🩺 Healthcare AI Assistant")
# # # # st.write("Ask me anything about health, symptoms, diet, or general medical advice!")
# # # # # User Input
# # # # user_question = st.text_input("Enter your health-related question:")
# # # # # Process User Query
# # # # if st.button("Get Recommendation"):
# # # # if user_question.strip():
# # # # with st.spinner("Analyzing..."):
# # # # response = llm.invoke(user_question)
# # # # st.success("Recommendation:")
# # # # st.write(response)
# # # # else:
# # # # st.warning("Please enter a question!")
# # # # # Footer
# # # # st.markdown("---")
# # # # st.markdown("πŸ’‘ *Disclaimer: This AI assistant provides general health information. Always consult a doctor for medical concerns.*")
# # # import streamlit as st
# # # from langchain_google_genai import ChatGoogleGenerativeAI
# # # # Set up AI model
# # # llm = ChatGoogleGenerativeAI(
# # # model="gemini-1.5-flash", # Free model
# # # google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE",
# # # temperature=0.5
# # # )
# # # # Streamlit UI
# # # st.title("🩺 AI Healthcare Learning Assistant")
# # # st.write("Ask me anything about healthcare, symptoms, diet, or medical learning!")
# # # # User Input
# # # user_question = st.text_input("Enter your healthcare question:")
# # # # Function to filter AI disclaimers
# # # def is_valid_response(response):
# # # disclaimers = [
# # # "I am an AI and cannot give medical advice",
# # # "Seek medical attention",
# # # "Consult a doctor",
# # # "Contact your doctor",
# # # "Go to an emergency room",
# # # ]
# # # return not any(phrase.lower() in response.lower() for phrase in disclaimers)
# # # # Process User Query
# # # if st.button("Get Information"):
# # # if user_question.strip():
# # # with st.spinner("Analyzing..."):
# # # response = llm.invoke(user_question)
# # # # Check if response is valid
# # # if is_valid_response(response):
# # # st.success("Here is the relevant information:")
# # # st.write(response)
# # # else:
# # # st.warning("AI provided a disclaimer. Trying again...")
# # # # Modify prompt to avoid disclaimers
# # # better_prompt = f"Give a well-explained answer for educational purposes only: {user_question}"
# # # retry_response = llm.invoke(better_prompt)
# # # # Display the retried response if it's valid
# # # if is_valid_response(retry_response):
# # # st.success("Here is the refined information:")
# # # st.write(retry_response)
# # # else:
# # # st.error("Unable to get a useful response. Try rephrasing your question.")
# # # else:
# # # st.warning("Please enter a question!")
# # # # Footer
# # # st.markdown("---")
# # # st.markdown("πŸ’‘ *This AI provides learning-based medical insights, not actual medical advice.*")
# # import streamlit as st
# # from langchain_google_genai import ChatGoogleGenerativeAI
# # # Set up AI model
# # llm = ChatGoogleGenerativeAI(
# # model="gemini-1.5-flash", # Free model
# # google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE",
# # temperature=0.5
# # )
# # # Streamlit UI
# # st.title("🩺 AI Healthcare Learning Assistant")
# # st.write("Ask me anything about healthcare, symptoms, diet, or medical learning!")
# # # User Input
# # user_question = st.text_input("Enter your healthcare question:")
# # # Function to filter AI disclaimers
# # def is_valid_response(response_text):
# # disclaimers = [
# # "I am an AI and cannot give medical advice",
# # "Seek medical attention",
# # "Consult a doctor",
# # "Contact your doctor",
# # "Go to an emergency room",
# # ]
# # return not any(phrase.lower() in response_text.lower() for phrase in disclaimers)
# # # Process User Query
# # if st.button("Get Information"):
# # if user_question.strip():
# # with st.spinner("Analyzing..."):
# # response = llm.invoke(user_question)
# # # Extract the text content from AIMessage
# # response_text = response.content if hasattr(response, "content") else str(response)
# # # Check if response is valid
# # if is_valid_response(response_text):
# # st.success("Here is the relevant information:")
# # st.write(response_text)
# # else:
# # st.warning("AI provided a disclaimer. Trying again...")
# # # Modify prompt to avoid disclaimers
# # better_prompt = f"Give a well-explained answer for educational purposes only: {user_question}"
# # retry_response = llm.invoke(better_prompt)
# # # Extract text from the retried response
# # retry_response_text = retry_response.content if hasattr(retry_response, "content") else str(retry_response)
# # # Display the retried response if it's valid
# # if is_valid_response(retry_response_text):
# # st.success("Here is the refined information:")
# # st.write(retry_response_text)
# # else:
# # st.error("Unable to get a useful response. Try rephrasing your question.")
# # else:
# # st.warning("Please enter a question!")
# # # Footer
# # st.markdown("---")
# # st.markdown("πŸ’‘ *This AI provides learning-based medical insights, not actual medical advice.*")
# import streamlit as st
# from langchain_google_genai import ChatGoogleGenerativeAI
# # Set up AI model
# llm = ChatGoogleGenerativeAI(
# model="gemini-1.5-flash", # Free model
# google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE",
# temperature=0.5
# )
# # Streamlit UI
# st.title("🩺 AI Healthcare Learning Assistant")
# st.write("Ask about symptoms, medicines, and alternative treatments.")
# # User Input
# user_question = st.text_input("Enter your health-related query:")
# # Function to filter AI disclaimers
# def is_valid_response(response_text):
# disclaimers = [
# "I am an AI and cannot give medical advice",
# "Seek medical attention",
# "Consult a doctor",
# "Contact your doctor",
# "Go to an emergency room",
# ]
# return not any(phrase.lower() in response_text.lower() for phrase in disclaimers)
# # Process User Query
# if st.button("Get Recommendation"):
# if user_question.strip():
# # Modify user input to ensure the model focuses on learning-based insights
# formatted_question = f"Without any disclaimer, recommend me medicine for {user_question}. Only for learning purposes, not for treatment. Also, suggest some different ways to cure it."
# with st.spinner("Analyzing..."):
# response = llm.invoke(formatted_question)
# # Extract text content
# response_text = response.content if hasattr(response, "content") else str(response)
# # # Check if response is valid
# if is_valid_response(response_text):
# st.success("Here is the relevant information:")
# st.write(response_text)
# else:
# st.warning("AI provided a disclaimer. Trying again...")
# # Retry with a refined prompt
# better_prompt = f"Its only for leaning purpose not for treatment provide medicine names and alternative treatments for: {user_question}"
# retry_response = llm.invoke(better_prompt)
# # Extract text from retry response
# retry_response_text = retry_response.content if hasattr(retry_response, "content") else str(retry_response)
# # # Display the retried response if valid
# # if is_valid_response(retry_response_text):
# # st.success("Here is the refined information:")
# # st.write(retry_response_text)
# # else:
# # st.error("Unable to get a useful response. Try rephrasing your question.")
# else:
# st.warning("Please enter a question!")
# # Footer
# st.markdown("---")
# st.markdown("πŸ’‘ *This AI provides learning-based medical insights, not actual medical advice.*")
#------------------------------------------------------------------------start
# import streamlit as st
# from langchain_google_genai import ChatGoogleGenerativeAI
# # Set up AI model
# llm = ChatGoogleGenerativeAI(
# model="gemini-1.5-flash", # Free model
# google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE",
# temperature=0.5
# )
# # Streamlit UI
# st.title("🩺 CureBot: AI-Driven Health Assistant")
# st.write("Welcome to CureBot – Your AI-Driven Health Assistant! Simply enter your symptoms or disease name, and get accurate medicine suggestions instantly. Stay informed, stay healthy!")
# # User Input
# user_question = st.text_input("Type your symptoms or disease name, and let CureBot unlock the right cure for youβ€”fast, smart, and AI-powered")
# # Function to filter AI disclaimers
# def is_valid_response(response_text):
# disclaimers = [
# "I am an AI and cannot give medical advice",
# "Seek medical attention",
# "Consult a doctor",
# "Contact your doctor",
# "Go to an emergency room",
# ]
# return not any(phrase.lower() in response_text.lower() for phrase in disclaimers)
# # Process User Query
# if st.button("Get Recommendation"):
# if user_question.strip():
# # Ensure the AI provides both medicine and alternative treatments
# formatted_question = (
# f"Without any disclaimer, recommend medicine for {user_question}. "
# f"5 medicine names "
# f"Also, provide alternative treatments such as home remedies, lifestyle changes, exercises, or dietary suggestions. "
# f"Only for learning purposes, not for treatment."
# )
# with st.spinner("Analyzing..."):
# # response = llm.invoke(formatted_question)
# # Extract text content
# response_text = response.content if hasattr(response, "content") else str(response)
# # Check if response is valid
# if is_valid_response(response_text):
# st.success("✨ Analysis complete! Here are the best medicine recommendations for you: πŸ”½")
# st.write(response_text)
# else:
# st.warning("⚠️ Oops! It looks like the input is unclear or incorrect. Please enter a valid disease name or symptoms to get accurate recommendations")
# # Retry with a refined prompt
# better_prompt = (
# f"Strictly provide a detailed answer including:\n"
# f"1. Medicine names\n"
# f"2. Home remedies\n"
# f"3. Lifestyle changes\n"
# f"4. Exercises\n"
# f"5. Diet recommendations\n"
# f"Do not include any disclaimers. The response should be clear and structured."
# )
# retry_response = llm.invoke(better_prompt)
# # Extract text from retry response
# retry_response_text = retry_response.content if hasattr(retry_response, "content") else str(retry_response)
# # Display the retried response if valid
# if is_valid_response(retry_response_text):
# st.success("Here is the refined information:")
# st.write(retry_response_text)
# else:
# st.error("Unable to get a useful response. Try rephrasing your question.")
# else:
# st.warning("Please enter a question!")
# # Emergency Contact Button
# if st.button("Emergency Contact"):
# st.subheader("πŸ“ž Emergency Contacts")
# st.write("- πŸš‘ *Ambulance:* 102")
# st.write("- πŸ₯ *LPU Hospital:* 18001024432")
# st.write("- πŸ₯ *National Health Helpline:* 108")
# st.write("- ☎ *COVID-19 Helpline:* 1075")
# st.write("- πŸš“ *Police:* 100")
# # Footer
# st.markdown("---")
# st.markdown("πŸ”Ή Powered by Mayank, Wasim, Pravishank – Innovating Healthcare with AI! πŸ’‘ Your Health, Our Mission. πŸš€")
#------------------------------------------------------------------------end
import streamlit as st
import speech_recognition as sr
from deep_translator import GoogleTranslator
from langchain_google_genai import ChatGoogleGenerativeAI
import matplotlib.pyplot as plt
import numpy as np
# Set up AI model
llm = ChatGoogleGenerativeAI(
model="gemini-1.5-flash",
google_api_key="YOUR_GOOGLE_API_KEY",
temperature=0.5
)
# Custom CSS
st.markdown("""
<style>
.big-font { font-size:20px !important; }
.stButton>button { background-color: #ff4b4b; color: white; font-size: 18px; }
.stTextInput>div>div>input { font-size: 16px; }
</style>
""", unsafe_allow_html=True)
# UI Setup
st.image("healthcare_logo.png", width=150)
st.title("🩺 CureBot: AI-Driven Health Assistant")
st.write("Empowering healthcare with AI-driven insights and recommendations!")
# Sidebar Navigation
st.sidebar.title("πŸ” Navigation")
option = st.sidebar.radio("Select an option:", ["Home", "Symptom Checker", "Doctor Connect", "Health Stats"])
translator = GoogleTranslator(source='auto', target='en')
if option == "Home":
user_question = st.text_input("Type your symptoms or disease name:")
if st.button("🎀 Speak Symptoms"):
recognizer = sr.Recognizer()
with sr.Microphone() as source:
st.info("Listening...")
try:
audio = recognizer.listen(source)
user_question = recognizer.recognize_google(audio)
st.success(f"Recognized: {user_question}")
except sr.UnknownValueError:
st.error("Could not understand audio")
except sr.RequestError:
st.error("Error in speech recognition service")
lang = st.selectbox("Select Language", ["English", "Hindi", "Spanish"])
if lang != "English":
user_question = translator.translate(user_question, src="en", dest=lang.lower()).text
if st.button("Get Recommendation"):
if user_question.strip():
formatted_question = (
f"Provide medicine and alternative treatments for {user_question}. "
f"List medicines, home remedies, lifestyle changes, exercises, and diet suggestions."
)
with st.spinner("Analyzing..."):
response = llm.invoke(formatted_question)
response_text = response.content if hasattr(response, "content") else str(response)
st.success("✨ Analysis complete! Here are your recommendations:")
st.markdown(response_text)
else:
st.warning("Please enter a symptom or disease name!")
elif option == "Symptom Checker":
st.subheader("πŸ”Ž AI Symptom Checker")
st.write("Find possible diseases based on symptoms.")
symptoms = st.text_area("Enter symptoms separated by commas:")
if st.button("Check Symptoms"):
symptom_query = f"Analyze these symptoms: {symptoms}. List possible diseases."
response = llm.invoke(symptom_query)
st.write(response.content if hasattr(response, "content") else str(response))
elif option == "Doctor Connect":
st.subheader("πŸ₯ Find a Doctor Near You")
st.write("Using Google Maps API to find the nearest hospitals and doctors.")
st.write("(Feature Under Development)")
elif option == "Health Stats":
st.subheader("πŸ“Š Health Trends & Data")
diseases = ['Diabetes', 'Hypertension', 'Heart Disease', 'Asthma', 'Obesity']
cases = np.random.randint(5000, 20000, size=len(diseases))
fig, ax = plt.subplots()
ax.barh(diseases, cases, color=['blue', 'green', 'red', 'purple', 'orange'])
ax.set_xlabel("Number of Cases")
ax.set_title("Disease Prevalence Statistics")
st.pyplot(fig)
# Emergency Contact Section
st.sidebar.markdown("---")
st.sidebar.subheader("πŸ“ž Emergency Contacts")
st.sidebar.write("- πŸš‘ *Ambulance:* 102")
st.sidebar.write("- πŸ₯ *LPU Hospital:* 18001024432")
st.sidebar.write("- πŸ₯ *National Health Helpline:* 108")
st.sidebar.write("- ☎ *COVID-19 Helpline:* 1075")
st.sidebar.write("- πŸš“ *Police:* 100")
st.markdown("---")
st.markdown("πŸ”Ή Powered by Mayank, Wasim, Pravishank – Innovating Healthcare with AI! πŸ’‘ Your Health, Our Mission. πŸš€")