Sanchit2207 commited on
Commit
17f9ae0
·
verified ·
1 Parent(s): 2c6dcac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -54
app.py CHANGED
@@ -1,57 +1,38 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
2
- import torch
3
  import gradio as gr
4
 
5
- # Load DialoGPT-medium
6
- tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
7
- model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
8
-
9
- # Global state to keep track of chat
10
- chat_history_ids = None
11
-
12
- def respond(user_input, history):
13
- global chat_history_ids
14
-
15
- # Tokenize the input with EOS token
16
- new_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt')
17
-
18
- # Append to history if available
19
- bot_input_ids = torch.cat([chat_history_ids, new_input_ids], dim=-1) if chat_history_ids is not None else new_input_ids
20
-
21
- # Generate response
22
- chat_history_ids = model.generate(
23
- bot_input_ids,
24
- max_length=1000,
25
- pad_token_id=tokenizer.eos_token_id,
26
- temperature=0.7,
27
- top_k=50,
28
- top_p=0.95,
29
- do_sample=True
30
- )
31
-
32
- # Decode only the new tokens
33
- response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
34
-
35
- # Truncate output to avoid HTTP overflow (safe zone ~500 chars)
36
- response = response[:500]
37
-
38
- # Append to history
39
- history.append((user_input, response))
40
- return history, history
41
-
42
- # Build Gradio Blocks UI
43
- with gr.Blocks(title="Teen Mental Health Chatbot") as demo:
44
- gr.Markdown("### 💬 Teen Mental Health Chatbot\n_Not a replacement for therapy, but here to talk._")
45
-
46
- chatbot = gr.Chatbot()
47
- msg = gr.Textbox(placeholder="Tell me how you're feeling...")
48
- state = gr.State([])
49
-
50
- send = gr.Button("Send")
51
-
52
- send.click(fn=respond, inputs=[msg, state], outputs=[chatbot, state])
53
- msg.submit(fn=respond, inputs=[msg, state], outputs=[chatbot, state])
54
-
55
- # Required to run on Hugging Face
56
  if __name__ == "__main__":
57
- demo.launch()
 
 
 
 
1
  import gradio as gr
2
 
3
+ # Custom supportive chatbot function
4
+ def mental_health_bot(message):
5
+ message = message.lower().strip()
6
+
7
+ responses = {
8
+ "lonely": "You're not alone. I'm here to listen. Would you like to talk about what's making you feel this way?",
9
+ "heartbroken": "Heartbreak can feel really heavy. Want to share what happened? I'm here for you.",
10
+ "not able to study": "It’s okay to feel stuck. Maybe take a short break, or we can talk about what’s bothering you?",
11
+ "lackin in physics": "Struggling is part of learning. Maybe I can help you with a study tip or we can just talk about what’s making it tough.",
12
+ "anxious": "Anxiety can be overwhelming. Want to talk through it together?",
13
+ "depressed": "You matter. Even if it doesn’t feel like it right now, things can get better. I'm here for you.",
14
+ "suicidal": "I'm really sorry you're feeling this way. You're not alone. Please consider reaching out to a mental health professional or a helpline right away. Your life is important."
15
+ }
16
+
17
+ # Check if any keyword is matched
18
+ for keyword in responses:
19
+ if keyword in message:
20
+ return responses[keyword]
21
+
22
+ # Default fallback
23
+ return "I'm here for you. Want to talk more about how you're feeling?"
24
+
25
+ # Gradio interface setup
26
+ chat_interface = gr.Interface(
27
+ fn=mental_health_bot,
28
+ inputs=gr.Textbox(lines=2, placeholder="How are you feeling today?"),
29
+ outputs="text",
30
+ title="Teen Mental Health Chatbot 💬",
31
+ description="A kind and simple chatbot to support your feelings. You're not alone. Let's talk.",
32
+ theme="soft"
33
+ )
34
+
35
+ # Launch the app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  if __name__ == "__main__":
37
+ chat_interface.launch()
38
+