File size: 4,971 Bytes
fb81eb8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d6e762
 
fb81eb8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ee3be8
fb81eb8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
"""
Mental Health Chatbot Application

This module implements a mental health chatbot using LangChain and Hugging Face models.
The chatbot provides supportive and non-judgmental guidance to users struggling with
mental health issues.

Author: Vicky 111
Date: October 27, 2024
"""

import os
from typing import List, Optional, Tuple

import gradio as gr
from langchain.prompts import PromptTemplate
from langchain_community.llms import HuggingFaceEndpoint
from transformers import pipeline

# Configuration Constants
DEFAULT_MODEL = "google/gemma-1.1-7b-it"
#DEFAULT_MODEL = "meta-llama/Llama-3.2-3B"
MAX_NEW_TOKENS = 512
TOP_K = 5
TEMPERATURE = 0.2
REPETITION_PENALTY = 1.03
SUICIDE_HELPLINE = "+91 91529 87821"

class MentalHealthChatbot:
    """
    A chatbot class specifically designed for mental health support and guidance.
    
    This class handles the initialization of the language model and provides
    methods for processing user inputs and generating appropriate responses.
    """
    
    def __init__(
        self,
        model_id: str = DEFAULT_MODEL,
        api_token: Optional[str] = None
    ) -> None:
        """
        Initialize the chatbot with specified model and configurations.
        
        Args:
            model_id: The Hugging Face model identifier to use
            api_token: Hugging Face API token for authentication
        """
        self.api_token = api_token or os.getenv("HF_TOKEN")
        if not self.api_token:
            raise ValueError("Hugging Face API token not found")
            
        self.llm = self._initialize_llm(model_id)
        self.prompt_template = self._create_prompt_template()
        
    def _initialize_llm(self, model_id: str) -> HuggingFaceEndpoint:
        """Initialize the language model with specified configurations."""
        return HuggingFaceEndpoint(
            repo_id=model_id,
            task="text-generation",
            max_new_tokens=MAX_NEW_TOKENS,
            top_k=TOP_K,
            temperature=TEMPERATURE,
            repetition_penalty=REPETITION_PENALTY,
            huggingfacehub_api_token=self.api_token
        )
    
    def _create_prompt_template(self) -> PromptTemplate:
        """Create and return the prompt template for the chatbot."""
        template = """
        You are a Mental Health Chatbot named "CalmCompass", your purpose is to provide supportive and 
        non-judgmental guidance to users who are struggling with their mental health as sad, depression and anxiety.
        Your goal is to help users identify their concerns, offer resources and coping
        strategies, and encourage them to seek professional help when needed.
        If the user asks anything apart from mental health or speaks about other disesases, 
        or medical condition inform them polietly that you only respond to mental health related conversations

        User Context: {context}
        Question: {question}

        Guidelines:
        1. If symptoms are not mental health-related, clarify your scope
        2. Ask relevant follow-up questions when needed (age, status, interests)
        3. Provide motivation stories only when specifically needed
        4. For suicidal thoughts, immediately provide the helpline: {suicide_helpline}
        5. Always maintain a supportive, friendly and professional tone

        Helpful Answer:
        """
        return PromptTemplate(
            input_variables=["context", "question", "suicide_helpline"],
            template=template
        )
    
    def generate_response(
        self,
        message: str,
        history: List[Tuple[str, str]]
    ) -> str:
        """
        Generate a response based on the user's message and conversation history.
        
        Args:
            message: The user's input message
            history: List of previous conversation turns
            
        Returns:
            str: The generated response from the chatbot
        """
        try:
            input_prompt = self.prompt_template.format(
                question=message,
                context=history,
                suicide_helpline=SUICIDE_HELPLINE
            )
            result = self.llm.generate([input_prompt])
            
            if result.generations:
                return result.generations[0][0].text
            return "I apologize, but I couldn't generate a response. Please try rephrasing your question."
            
        except Exception as e:
            print(f"Error generating response: {str(e)}")
            return "I'm experiencing technical difficulties. Please try again later."

def main():
    """Initialize and launch the Gradio interface for the chatbot."""
    try:
        chatbot = MentalHealthChatbot()
        interface = gr.ChatInterface(chatbot.generate_response)
        interface.launch()
        
    except Exception as e:
        print(f"Failed to initialize chatbot: {str(e)}")
        
if __name__ == "__main__":
    main()