safwansajad commited on
Commit
e9a015f
·
verified ·
1 Parent(s): 9799648

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -40
app.py CHANGED
@@ -1,45 +1,22 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import torch
4
 
5
- # Load the model and tokenizer
6
- model_name = "givyboy/TinyLlama-1.1B-Chat-v1.0-mental-health-conversational"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
9
 
10
- # Function to generate response
11
- def chat_with_bot(message, history):
12
- history = history or []
13
- chat_history = ""
14
- for user_msg, bot_msg in history:
15
- chat_history += f"User: {user_msg}\nBot: {bot_msg}\n"
16
- chat_history += f"User: {message}\nBot:"
17
 
18
- inputs = tokenizer(chat_history, return_tensors="pt", return_attention_mask=False).to(model.device)
19
- outputs = model.generate(
20
- **inputs,
21
- max_new_tokens=150,
22
- do_sample=True,
23
- temperature=0.7,
24
- top_p=0.9,
25
- pad_token_id=tokenizer.eos_token_id
26
- )
27
 
28
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
29
- bot_reply = response.split("Bot:")[-1].strip()
30
-
31
- history.append((message, bot_reply))
32
- return bot_reply, history
33
-
34
- # UI
35
- chat_interface = gr.ChatInterface(
36
- fn=chat_with_bot,
37
- title="🧠 SerenityAI Mental Health Chatbot",
38
- description="Welcome! I'm here to help you talk through your thoughts and feelings. 💙",
39
- theme="soft",
40
- examples=["I'm feeling anxious lately.", "I had a great day!", "I feel really overwhelmed."],
41
- retry_btn="🔁 Try Again",
42
- clear_btn="🧹 Clear Chat",
43
- )
44
-
45
- chat_interface.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline
 
3
 
4
+ # Load model from Hugging Face Hub
5
+ model_name = "thrishala/mental_health_chatbot"
6
+ nlp = pipeline("text-generation", model=model_name)
 
7
 
8
+ # Function to process user input and return chatbot's response
9
+ def chatbot_response(user_input):
10
+ # Get response using the model pipeline
11
+ response = nlp(user_input, max_length=150, num_return_sequences=1)
12
+ return response[0]['generated_text']
 
 
13
 
14
+ # Gradio interface
15
+ iface = gr.Interface(fn=chatbot_response,
16
+ inputs="text",
17
+ outputs="text",
18
+ title="Mental Health Chatbot",
19
+ description="This chatbot provides empathetic responses to mental health-related queries. It aims to support users in a safe and compassionate manner.")
 
 
 
20
 
21
+ # Launch the app
22
+ iface.launch()