Spaces:
Sleeping
Sleeping
# # # # # from langchain_google_genai import ChatGoogleGenerativeAI | |
# # # # # llm = ChatGoogleGenerativeAI( | |
# # # # # model="gemini-1.5-flash", | |
# # # # # google_api_key='AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE', | |
# # # # # temperature=0.2) | |
# # # # # poem = llm.invoke("Write a poem on love for burger") | |
# # # # # print(poem) | |
# # # # import streamlit as st | |
# # # # from langchain_google_genai import ChatGoogleGenerativeAI | |
# # # # # Set up the AI model | |
# # # # llm = ChatGoogleGenerativeAI( | |
# # # # model="gemini-1.5-flash", # Free model | |
# # # # google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE", | |
# # # # temperature=0.5 | |
# # # # ) | |
# # # # # Streamlit UI | |
# # # # st.title("π©Ί Healthcare AI Assistant") | |
# # # # st.write("Ask me anything about health, symptoms, diet, or general medical advice!") | |
# # # # # User Input | |
# # # # user_question = st.text_input("Enter your health-related question:") | |
# # # # # Process User Query | |
# # # # if st.button("Get Recommendation"): | |
# # # # if user_question.strip(): | |
# # # # with st.spinner("Analyzing..."): | |
# # # # response = llm.invoke(user_question) | |
# # # # st.success("Recommendation:") | |
# # # # st.write(response) | |
# # # # else: | |
# # # # st.warning("Please enter a question!") | |
# # # # # Footer | |
# # # # st.markdown("---") | |
# # # # st.markdown("π‘ *Disclaimer: This AI assistant provides general health information. Always consult a doctor for medical concerns.*") | |
# # # import streamlit as st | |
# # # from langchain_google_genai import ChatGoogleGenerativeAI | |
# # # # Set up AI model | |
# # # llm = ChatGoogleGenerativeAI( | |
# # # model="gemini-1.5-flash", # Free model | |
# # # google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE", | |
# # # temperature=0.5 | |
# # # ) | |
# # # # Streamlit UI | |
# # # st.title("π©Ί AI Healthcare Learning Assistant") | |
# # # st.write("Ask me anything about healthcare, symptoms, diet, or medical learning!") | |
# # # # User Input | |
# # # user_question = st.text_input("Enter your healthcare question:") | |
# # # # Function to filter AI disclaimers | |
# # # def is_valid_response(response): | |
# # # disclaimers = [ | |
# # # "I am an AI and cannot give medical advice", | |
# # # "Seek medical attention", | |
# # # "Consult a doctor", | |
# # # "Contact your doctor", | |
# # # "Go to an emergency room", | |
# # # ] | |
# # # return not any(phrase.lower() in response.lower() for phrase in disclaimers) | |
# # # # Process User Query | |
# # # if st.button("Get Information"): | |
# # # if user_question.strip(): | |
# # # with st.spinner("Analyzing..."): | |
# # # response = llm.invoke(user_question) | |
# # # # Check if response is valid | |
# # # if is_valid_response(response): | |
# # # st.success("Here is the relevant information:") | |
# # # st.write(response) | |
# # # else: | |
# # # st.warning("AI provided a disclaimer. Trying again...") | |
# # # # Modify prompt to avoid disclaimers | |
# # # better_prompt = f"Give a well-explained answer for educational purposes only: {user_question}" | |
# # # retry_response = llm.invoke(better_prompt) | |
# # # # Display the retried response if it's valid | |
# # # if is_valid_response(retry_response): | |
# # # st.success("Here is the refined information:") | |
# # # st.write(retry_response) | |
# # # else: | |
# # # st.error("Unable to get a useful response. Try rephrasing your question.") | |
# # # else: | |
# # # st.warning("Please enter a question!") | |
# # # # Footer | |
# # # st.markdown("---") | |
# # # st.markdown("π‘ *This AI provides learning-based medical insights, not actual medical advice.*") | |
# # import streamlit as st | |
# # from langchain_google_genai import ChatGoogleGenerativeAI | |
# # # Set up AI model | |
# # llm = ChatGoogleGenerativeAI( | |
# # model="gemini-1.5-flash", # Free model | |
# # google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE", | |
# # temperature=0.5 | |
# # ) | |
# # # Streamlit UI | |
# # st.title("π©Ί AI Healthcare Learning Assistant") | |
# # st.write("Ask me anything about healthcare, symptoms, diet, or medical learning!") | |
# # # User Input | |
# # user_question = st.text_input("Enter your healthcare question:") | |
# # # Function to filter AI disclaimers | |
# # def is_valid_response(response_text): | |
# # disclaimers = [ | |
# # "I am an AI and cannot give medical advice", | |
# # "Seek medical attention", | |
# # "Consult a doctor", | |
# # "Contact your doctor", | |
# # "Go to an emergency room", | |
# # ] | |
# # return not any(phrase.lower() in response_text.lower() for phrase in disclaimers) | |
# # # Process User Query | |
# # if st.button("Get Information"): | |
# # if user_question.strip(): | |
# # with st.spinner("Analyzing..."): | |
# # response = llm.invoke(user_question) | |
# # # Extract the text content from AIMessage | |
# # response_text = response.content if hasattr(response, "content") else str(response) | |
# # # Check if response is valid | |
# # if is_valid_response(response_text): | |
# # st.success("Here is the relevant information:") | |
# # st.write(response_text) | |
# # else: | |
# # st.warning("AI provided a disclaimer. Trying again...") | |
# # # Modify prompt to avoid disclaimers | |
# # better_prompt = f"Give a well-explained answer for educational purposes only: {user_question}" | |
# # retry_response = llm.invoke(better_prompt) | |
# # # Extract text from the retried response | |
# # retry_response_text = retry_response.content if hasattr(retry_response, "content") else str(retry_response) | |
# # # Display the retried response if it's valid | |
# # if is_valid_response(retry_response_text): | |
# # st.success("Here is the refined information:") | |
# # st.write(retry_response_text) | |
# # else: | |
# # st.error("Unable to get a useful response. Try rephrasing your question.") | |
# # else: | |
# # st.warning("Please enter a question!") | |
# # # Footer | |
# # st.markdown("---") | |
# # st.markdown("π‘ *This AI provides learning-based medical insights, not actual medical advice.*") | |
# import streamlit as st | |
# from langchain_google_genai import ChatGoogleGenerativeAI | |
# # Set up AI model | |
# llm = ChatGoogleGenerativeAI( | |
# model="gemini-1.5-flash", # Free model | |
# google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE", | |
# temperature=0.5 | |
# ) | |
# # Streamlit UI | |
# st.title("π©Ί AI Healthcare Learning Assistant") | |
# st.write("Ask about symptoms, medicines, and alternative treatments.") | |
# # User Input | |
# user_question = st.text_input("Enter your health-related query:") | |
# # Function to filter AI disclaimers | |
# def is_valid_response(response_text): | |
# disclaimers = [ | |
# "I am an AI and cannot give medical advice", | |
# "Seek medical attention", | |
# "Consult a doctor", | |
# "Contact your doctor", | |
# "Go to an emergency room", | |
# ] | |
# return not any(phrase.lower() in response_text.lower() for phrase in disclaimers) | |
# # Process User Query | |
# if st.button("Get Recommendation"): | |
# if user_question.strip(): | |
# # Modify user input to ensure the model focuses on learning-based insights | |
# formatted_question = f"Without any disclaimer, recommend me medicine for {user_question}. Only for learning purposes, not for treatment. Also, suggest some different ways to cure it." | |
# with st.spinner("Analyzing..."): | |
# response = llm.invoke(formatted_question) | |
# # Extract text content | |
# response_text = response.content if hasattr(response, "content") else str(response) | |
# # # Check if response is valid | |
# if is_valid_response(response_text): | |
# st.success("Here is the relevant information:") | |
# st.write(response_text) | |
# else: | |
# st.warning("AI provided a disclaimer. Trying again...") | |
# # Retry with a refined prompt | |
# better_prompt = f"Its only for leaning purpose not for treatment provide medicine names and alternative treatments for: {user_question}" | |
# retry_response = llm.invoke(better_prompt) | |
# # Extract text from retry response | |
# retry_response_text = retry_response.content if hasattr(retry_response, "content") else str(retry_response) | |
# # # Display the retried response if valid | |
# # if is_valid_response(retry_response_text): | |
# # st.success("Here is the refined information:") | |
# # st.write(retry_response_text) | |
# # else: | |
# # st.error("Unable to get a useful response. Try rephrasing your question.") | |
# else: | |
# st.warning("Please enter a question!") | |
# # Footer | |
# st.markdown("---") | |
# st.markdown("π‘ *This AI provides learning-based medical insights, not actual medical advice.*") | |
import streamlit as st | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
# Set up AI model | |
llm = ChatGoogleGenerativeAI( | |
model="gemini-1.5-flash", # Free model | |
google_api_key="AIzaSyC7Rhv4L6_oNl-nW3Qeku2SPRkxL5hhtoE", | |
temperature=0.5 | |
) | |
# Streamlit UI | |
st.title("π©Ί CureBot: AI-Driven Health Assistant") | |
st.write("Welcome to CureBot β Your AI-Driven Health Assistant! Simply enter your symptoms or disease name, and get accurate medicine suggestions instantly. Stay informed, stay healthy!") | |
# User Input | |
user_question = st.text_input("Type your symptoms or disease name, and let CureBot unlock the right cure for youβfast, smart, and AI-powered") | |
# Function to filter AI disclaimers | |
def is_valid_response(response_text): | |
disclaimers = [ | |
"I am an AI and cannot give medical advice", | |
"Seek medical attention", | |
"Consult a doctor", | |
"Contact your doctor", | |
"Go to an emergency room", | |
] | |
return not any(phrase.lower() in response_text.lower() for phrase in disclaimers) | |
# Process User Query | |
if st.button("Get Recommendation"): | |
if user_question.strip(): | |
# Ensure the AI provides both medicine and alternative treatments | |
formatted_question = ( | |
f"Without any disclaimer, recommend medicine for {user_question}. " | |
f"Also, provide alternative treatments such as home remedies, lifestyle changes, exercises, or dietary suggestions. " | |
f"Only for learning purposes, not for treatment." | |
) | |
with st.spinner("Analyzing..."): | |
response = llm.invoke(formatted_question) | |
# Extract text content | |
response_text = response.content if hasattr(response, "content") else str(response) | |
# Check if response is valid | |
if is_valid_response(response_text): | |
st.success("β¨ Analysis complete! Here are the best medicine recommendations for you: π½") | |
st.write(response_text) | |
else: | |
st.warning("β οΈ Oops! It looks like the input is unclear or incorrect. Please enter a valid disease name or symptoms to get accurate recommendations") | |
# Retry with a refined prompt | |
better_prompt = ( | |
f"Strictly provide a detailed answer including:\n" | |
f"1. Medicine names\n" | |
f"2. Home remedies\n" | |
f"3. Lifestyle changes\n" | |
f"4. Exercises\n" | |
f"5. Diet recommendations\n" | |
f"Do not include any disclaimers. The response should be clear and structured." | |
) | |
retry_response = llm.invoke(better_prompt) | |
# Extract text from retry response | |
retry_response_text = retry_response.content if hasattr(retry_response, "content") else str(retry_response) | |
# Display the retried response if valid | |
if is_valid_response(retry_response_text): | |
st.success("Here is the refined information:") | |
st.write(retry_response_text) | |
else: | |
st.error("Unable to get a useful response. Try rephrasing your question.") | |
else: | |
st.warning("Please enter a question!") | |
# Emergency Contact Button | |
if st.button("Emergency Contact"): | |
st.subheader("π Emergency Contacts") | |
st.write("- π *Ambulance:* 102") | |
st.write("- π₯ *LPU Hospital:* 18001024432") | |
st.write("- π₯ *National Health Helpline:* 108") | |
st.write("- β *COVID-19 Helpline:*Β 1075") | |
st.write("- π *Police:* 100") | |
# Footer | |
st.markdown("---") | |
st.markdown("π‘ *This AI provides learning-based medical insights, not actual medical advice.*") | |
st.markdown("πΉ Powered by Mayank, Wasim, Pravishank β Innovating Healthcare with AI! π‘ Your Health, Our Mission. π") | |