Spaces:
Sleeping
Sleeping
""" | |
Mental Health Chatbot Application | |
This module implements a mental health chatbot using LangChain and Hugging Face models. | |
The chatbot provides supportive and non-judgmental guidance to users struggling with | |
mental health issues. | |
Author: Vicky 111 | |
Date: October 27, 2024 | |
""" | |
import os | |
from typing import List, Optional, Tuple | |
import gradio as gr | |
from langchain.prompts import PromptTemplate | |
from langchain_community.llms import HuggingFaceEndpoint | |
from transformers import pipeline | |
# Configuration Constants | |
DEFAULT_MODEL = "google/gemma-1.1-7b-it" | |
#DEFAULT_MODEL = "meta-llama/Llama-3.2-3B" | |
MAX_NEW_TOKENS = 512 | |
TOP_K = 5 | |
TEMPERATURE = 0.2 | |
REPETITION_PENALTY = 1.03 | |
SUICIDE_HELPLINE = "+91 91529 87821" | |
class MentalHealthChatbot: | |
""" | |
A chatbot class specifically designed for mental health support and guidance. | |
This class handles the initialization of the language model and provides | |
methods for processing user inputs and generating appropriate responses. | |
""" | |
def __init__( | |
self, | |
model_id: str = DEFAULT_MODEL, | |
api_token: Optional[str] = None | |
) -> None: | |
""" | |
Initialize the chatbot with specified model and configurations. | |
Args: | |
model_id: The Hugging Face model identifier to use | |
api_token: Hugging Face API token for authentication | |
""" | |
self.api_token = api_token or os.getenv("HF_TOKEN") | |
if not self.api_token: | |
raise ValueError("Hugging Face API token not found") | |
self.llm = self._initialize_llm(model_id) | |
self.prompt_template = self._create_prompt_template() | |
def _initialize_llm(self, model_id: str) -> HuggingFaceEndpoint: | |
"""Initialize the language model with specified configurations.""" | |
return HuggingFaceEndpoint( | |
repo_id=model_id, | |
task="text-generation", | |
max_new_tokens=MAX_NEW_TOKENS, | |
top_k=TOP_K, | |
temperature=TEMPERATURE, | |
repetition_penalty=REPETITION_PENALTY, | |
huggingfacehub_api_token=self.api_token | |
) | |
def _create_prompt_template(self) -> PromptTemplate: | |
"""Create and return the prompt template for the chatbot.""" | |
template = """ | |
You are a Mental Health Chatbot named "CalmCompass", your purpose is to provide supportive and | |
non-judgmental guidance to users who are struggling with their mental health as sad, depression and anxiety. | |
Your goal is to help users identify their concerns, offer resources and coping | |
strategies, and encourage them to seek professional help when needed. | |
If the user asks anything apart from mental health or speaks about other disesases, | |
or medical condition inform them polietly that you only respond to mental health related conversations | |
User Context: {context} | |
Question: {question} | |
Guidelines: | |
1. If symptoms are not mental health-related, clarify your scope | |
2. Ask relevant follow-up questions when needed (age, status, interests) | |
3. Provide motivation stories only when specifically needed | |
4. For suicidal thoughts, immediately provide the helpline: {suicide_helpline} | |
5. Always maintain a supportive, friendly and professional tone | |
Helpful Answer: | |
""" | |
return PromptTemplate( | |
input_variables=["context", "question", "suicide_helpline"], | |
template=template | |
) | |
def generate_response( | |
self, | |
message: str, | |
history: List[Tuple[str, str]] | |
) -> str: | |
""" | |
Generate a response based on the user's message and conversation history. | |
Args: | |
message: The user's input message | |
history: List of previous conversation turns | |
Returns: | |
str: The generated response from the chatbot | |
""" | |
try: | |
input_prompt = self.prompt_template.format( | |
question=message, | |
context=history, | |
suicide_helpline=SUICIDE_HELPLINE | |
) | |
result = self.llm.generate([input_prompt]) | |
if result.generations: | |
return result.generations[0][0].text | |
return "I apologize, but I couldn't generate a response. Please try rephrasing your question." | |
except Exception as e: | |
print(f"Error generating response: {str(e)}") | |
return "I'm experiencing technical difficulties. Please try again later." | |
def main(): | |
"""Initialize and launch the Gradio interface for the chatbot.""" | |
try: | |
chatbot = MentalHealthChatbot() | |
interface = gr.ChatInterface(chatbot.generate_response) | |
interface.launch() | |
except Exception as e: | |
print(f"Failed to initialize chatbot: {str(e)}") | |
if __name__ == "__main__": | |
main() |