import os import gradio as gr import nltk import numpy as np import tensorflow as tf from tensorflow.keras.models import Sequential, load_model from tensorflow.keras.layers import Dense import random import json import pickle from nltk.tokenize import word_tokenize from nltk.stem.lancaster import LancasterStemmer from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline import googlemaps import folium import torch import pandas as pd from sklearn.preprocessing import LabelEncoder from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score import logging # --- Groq LLM Integration --- from groq import Groq # Load Groq API key from Hugging Face secret GROQ_API_KEY = os.environ.get("groq") client = Groq(api_key=GROQ_API_KEY) def ask_groq_llm(user_message): response = client.chat.completions.create( model="llama-3.3-70b-versatile", messages=[ {"role": "user", "content": user_message} ] ) return response.choices[0].message.content # Suppress TensorFlow warnings os.environ["CUDA_VISIBLE_DEVICES"] = "-1" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # Download necessary NLTK resources nltk.download("punkt") stemmer = LancasterStemmer() # Load intents and chatbot training data with open("intents.json") as file: intents_data = json.load(file) with open("data.pickle", "rb") as f: words, labels, training, output = pickle.load(f) # Build the chatbot model using TensorFlow 2.x Keras chatbot_model = Sequential([ Dense(8, input_shape=(len(training[0]),), activation='relu'), Dense(8, activation='relu'), Dense(len(output[0]), activation='softmax') ]) chatbot_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # Load the saved model or train if not present if os.path.exists("MentalHealthChatBotmodel.h5"): chatbot_model = load_model("MentalHealthChatBotmodel.h5") else: chatbot_model.fit(training, output, epochs=1000, batch_size=8, verbose=1) chatbot_model.save("MentalHealthChatBotmodel.h5") # Hugging Face sentiment and emotion models tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base") model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base") # Initialize emotion pipeline once emotion_pipeline = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion) # Google Maps API Client gmaps = googlemaps.Client(key=os.getenv("GOOGLE_API_KEY")) # Load the disease dataset df_train = pd.read_csv("Training.csv") df_test = pd.read_csv("Testing.csv") # Label encoder for consistent train/test encoding label_encoder = LabelEncoder() def prepare_data(df, is_train=True): X = df.iloc[:, :-1] y = df.iloc[:, -1] if is_train: y_encoded = label_encoder.fit_transform(y) else: y_encoded = label_encoder.transform(y) return X, y_encoded X_train, y_train = prepare_data(df_train, is_train=True) X_test, y_test = prepare_data(df_test, is_train=False) # Define the models models = { "Decision Tree": DecisionTreeClassifier(), "Random Forest": RandomForestClassifier(), "Naive Bayes": GaussianNB() } # Train and evaluate models trained_models = {} for model_name, model_obj in models.items(): model_obj.fit(X_train, y_train) y_pred = model_obj.predict(X_test) acc = accuracy_score(y_test, y_pred) trained_models[model_name] = {'model': model_obj, 'accuracy': acc} # --- Helper Functions for Chatbot --- def bag_of_words(s, words): bag = [0] * len(words) s_words = word_tokenize(s) s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()] for se in s_words: for i, w in enumerate(words): if w == se: bag[i] = 1 return np.array(bag) def generate_chatbot_response(message, history): history = history or [] try: result = chatbot_model.predict(np.array([bag_of_words(message, words)]), verbose=0) tag = labels[np.argmax(result)] response = "I'm sorry, I didn't understand that. πŸ€”" for intent in intents_data["intents"]: if intent["tag"] == tag: response = random.choice(intent["responses"]) break except Exception as e: response = f"Error: {e}" history.append((message, response)) return history, response def analyze_sentiment(user_input): inputs = tokenizer_sentiment(user_input, return_tensors="pt") with torch.no_grad(): outputs = model_sentiment(**inputs) sentiment_class = torch.argmax(outputs.logits, dim=1).item() sentiment_map = ["Negative πŸ˜”", "Neutral 😐", "Positive 😊"] return f"Sentiment: {sentiment_map[sentiment_class]}" def detect_emotion(user_input): result = emotion_pipeline(user_input) emotion = result[0]["label"].lower().strip() emotion_map = { "joy": "Joy 😊", "anger": "Anger 😠", "sadness": "Sadness 😒", "fear": "Fear 😨", "surprise": "Surprise 😲", "neutral": "Neutral 😐", } return emotion_map.get(emotion, "Unknown πŸ€”"), emotion def generate_suggestions(emotion): emotion_key = emotion.lower() suggestions = { "joy": [ ("Mindfulness Practices", "https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation"), ("Coping with Anxiety", "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety"), ("Emotional Wellness Toolkit", "https://www.nih.gov/health-information/emotional-wellness-toolkit"), ("Relaxation Video", "https://youtu.be/yGKKz185M5o"), ], "anger": [ ("Emotional Wellness Toolkit", "https://www.nih.gov/health-information/emotional-wellness-toolkit"), ("Stress Management Tips", "https://www.health.harvard.edu/health-a-to-z"), ("Dealing with Anger", "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety"), ("Relaxation Video", "https://youtu.be/MIc299Flibs"), ], "fear": [ ("Mindfulness Practices", "https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation"), ("Coping with Anxiety", "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety"), ("Emotional Wellness Toolkit", "https://www.nih.gov/health-information/emotional-wellness-toolkit"), ("Relaxation Video", "https://youtu.be/yGKKz185M5o"), ], "sadness": [ ("Emotional Wellness Toolkit", "https://www.nih.gov/health-information/emotional-wellness-toolkit"), ("Dealing with Anxiety", "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety"), ("Relaxation Video", "https://youtu.be/-e-4Kx5px_I"), ], "surprise": [ ("Managing Stress", "https://www.health.harvard.edu/health-a-to-z"), ("Coping Strategies", "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety"), ("Relaxation Video", "https://youtu.be/m1vaUGtyo-A"), ], } formatted_suggestions = [ "### Suggestions", f"Since you’re feeling {emotion}, you might find these links particularly helpful. Don’t hesitate to explore:", "| Title | Link |", "|-------|------|" ] formatted_suggestions += [ f"| {title} | [{link}]({link}) |" for title, link in suggestions.get(emotion_key, [("No specific suggestions available.", "#")]) ] return "\n".join(formatted_suggestions) def get_health_professionals_and_map(location, query): try: if not location or not query: return [], "" geo_location = gmaps.geocode(location) if geo_location: lat, lng = geo_location[0]["geometry"]["location"].values() places_result = gmaps.places_nearby(location=(lat, lng), radius=10000, keyword=query)["results"] professionals = [] map_ = folium.Map(location=(lat, lng), zoom_start=13) for place in places_result: professionals.append([place['name'], place.get('vicinity', 'No address provided')]) folium.Marker( location=[place["geometry"]["location"]["lat"], place["geometry"]["location"]["lng"]], popup=f"{place['name']}" ).add_to(map_) return professionals, map_._repr_html_() return [], "" except Exception as e: logging.error(f"Error fetching health professionals: {e}") return [], "" def app_function_chatbot(user_input, location, query, history): chatbot_history, _ = generate_chatbot_response(user_input, history) sentiment_result = analyze_sentiment(user_input) emotion_result, cleaned_emotion = detect_emotion(user_input) suggestions = generate_suggestions(cleaned_emotion) professionals, map_html = get_health_professionals_and_map(location, query) return chatbot_history, sentiment_result, emotion_result, suggestions, professionals, map_html def predict_disease(symptoms): valid_symptoms = [s for s in symptoms if s is not None] if len(valid_symptoms) < 3: return "Please select at least 3 symptoms for a better prediction." input_test = np.zeros(len(X_train.columns)) for symptom in valid_symptoms: if symptom in X_train.columns: input_test[X_train.columns.get_loc(symptom)] = 1 predictions = {} for model_name, info in trained_models.items(): prediction = info['model'].predict([input_test])[0] predicted_disease = label_encoder.inverse_transform([prediction])[0] predictions[model_name] = predicted_disease markdown_output = [ "### Predicted Diseases", "| Model | Predicted Disease |", "|-------|------------------|" ] for model_name, disease in predictions.items(): markdown_output.append(f"| {model_name} | {disease} |") return "\n".join(markdown_output) welcome_message = """
Welcome to the Well-Being Companion!
""" with gr.Blocks(theme="shivi/calm_seafoam") as app: gr.HTML(welcome_message) with gr.Tab("Well-Being Chatbot"): gr.HTML("""

🌼 Well-Being Companion 🌼

Your Trustworthy Guide to Emotional Wellness and Health

🌈 Emotional Support | πŸ§˜πŸ»β€β™€οΈ Mindfulness | πŸ₯— Nutrition | πŸ‹οΈ Physical Health | πŸ’€ Sleep Hygiene

""") gr.HTML("""
Wellness Image 1 Wellness Image 2 Wellness Image 3
""") with gr.Row(): user_input = gr.Textbox(label="Please Enter Your Message Here", placeholder="Type your message here...", max_lines=3) location = gr.Textbox(label="Please Enter Your Current Location", placeholder="E.g., Honolulu", max_lines=1) query = gr.Textbox(label="Search Health Professionals Nearby", placeholder="E.g., Health Professionals", max_lines=1) with gr.Row(): submit_chatbot = gr.Button(value="Submit Your Message", variant="primary") clear_chatbot = gr.Button(value="Clear", variant="secondary") chatbot = gr.Chatbot(label="Chat History", show_label=True) sentiment = gr.Textbox(label="Detected Sentiment", show_label=True) emotion = gr.Textbox(label="Detected Emotion", show_label=True) professionals = gr.DataFrame( label="Nearby Health Professionals", headers=["Name", "Address"], value=[] ) suggestions_markdown = gr.Markdown(label="Suggestions") map_html = gr.HTML(label="Interactive Map") def clear_input(): return "", [] submit_chatbot.click( app_function_chatbot, inputs=[user_input, location, query, chatbot], outputs=[chatbot, sentiment, emotion, suggestions_markdown, professionals, map_html], ) clear_chatbot.click( clear_input, inputs=None, outputs=[user_input, chatbot] ) with gr.Tab("Groq Medical Q&A"): groq_input = gr.Textbox(label="Ask a medical question (Groq LLM)") groq_output = gr.Textbox(label="Groq LLM Response") groq_button = gr.Button("Ask Groq LLM") groq_button.click( ask_groq_llm, inputs=groq_input, outputs=groq_output ) with gr.Tab("Disease Prediction"): gr.HTML("""

Disease Prediction

Help us understand your symptoms!

""") symptom1 = gr.Dropdown(choices=[None] + list(X_train.columns), label="Select Symptom 1", value=None) symptom2 = gr.Dropdown(choices=[None] + list(X_train.columns), label="Select Symptom 2", value=None) symptom3 = gr.Dropdown(choices=[None] + list(X_train.columns), label="Select Symptom 3", value=None) symptom4 = gr.Dropdown(choices=[None] + list(X_train.columns), label="Select Symptom 4", value=None) symptom5 = gr.Dropdown(choices=[None] + list(X_train.columns), label="Select Symptom 5", value=None) submit_disease = gr.Button(value="Predict Disease", variant="primary") disease_prediction_result = gr.Markdown(label="Predicted Diseases") submit_disease.click( lambda sym1, sym2, sym3, sym4, sym5: predict_disease([sym1, sym2, sym3, sym4, sym5]), inputs=[symptom1, symptom2, symptom3, symptom4, symptom5], outputs=disease_prediction_result ) if __name__ == "__main__": app.launch(share=True)