safwansajad commited on
Commit
81a2ae6
Β·
verified Β·
1 Parent(s): ebcd95f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -73
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import streamlit as st
2
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
 
4
  # Load chatbot model
@@ -9,78 +9,20 @@ model = AutoModelForCausalLM.from_pretrained(chatbot_model)
9
  # Load emotion detection model
10
  emotion_pipeline = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion")
11
 
12
- st.title("🧠 Mental Health Chatbot")
 
 
 
 
 
13
 
14
- # Chat history
15
- if "chat_history" not in st.session_state:
16
- st.session_state.chat_history = []
17
 
18
- # User Input
19
- user_input = st.text_input("You:", key="user_input")
20
 
21
- if st.button("Send"):
22
- if user_input:
23
- # Generate chatbot response
24
- input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
25
- output = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
26
- response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
27
-
28
- # Detect emotion
29
- emotion_result = emotion_pipeline(user_input)
30
- emotion = emotion_result[0]["label"]
31
-
32
- # Store chat history
33
- st.session_state.chat_history.append(("You", user_input))
34
- st.session_state.chat_history.append(("Bot", response))
35
-
36
- # Display chat
37
- for sender, msg in st.session_state.chat_history:
38
- st.write(f"**{sender}:** {msg}")
39
-
40
- # Display emotion
41
- st.write(f"🧠 **Emotion Detected:** {emotion}")
42
-
43
-
44
-
45
- # import streamlit as st
46
- # from transformers import pipeline, AutoTokenizer
47
-
48
- # # βœ… Load Emotion Recognition Model
49
- # emotion_pipeline = pipeline("text-classification", model="ahmettasdemir/distilbert-base-uncased-finetuned-emotion")
50
-
51
- # # βœ… Load Stress Detection Model
52
- # stress_pipeline = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base")
53
-
54
- # # βœ… Load Mental Disorder Detection Model
55
- # mental_bert_pipeline = pipeline("text-classification", model="nlpconnect/vit-gpt2-image-captioning")
56
-
57
- # # βœ… Load PHQ-9 Depression Severity Classifier
58
- # phq9_pipeline = pipeline("text-classification", model="PHQ-9 Depression Classifier")
59
-
60
- # # βœ… Load Chatbot Model (DeepSeek)
61
- # deepseek_model = "deepseek-ai/deepseek-llm-7b"
62
- # deepseek_tokenizer = AutoTokenizer.from_pretrained(deepseek_model)
63
- # deepseek_pipeline = pipeline("text-generation", model=deepseek_model, tokenizer=deepseek_tokenizer)
64
-
65
- # # πŸ₯ Streamlit UI
66
- # st.title("🧠 Mental Health Assistant Bot")
67
-
68
- # user_input = st.text_input("How are you feeling today?", "")
69
-
70
- # if st.button("Submit"):
71
- # if user_input:
72
- # # βœ… Emotion Analysis
73
- # emotion_result = emotion_pipeline(user_input)[0]
74
- # st.write(f"**Emotion Detected:** {emotion_result['label']} ({emotion_result['score']:.2f})")
75
-
76
- # # βœ… Stress Level Analysis
77
- # stress_result = stress_pipeline(user_input)[0]
78
- # st.write(f"**Stress Level:** {stress_result['label']} ({stress_result['score']:.2f})")
79
-
80
- # # βœ… Mental Health Condition Detection
81
- # mental_health_result = mental_bert_pipeline(user_input)[0]
82
- # st.write(f"**Possible Mental Health Condition:** {mental_health_result['label']} ({mental_health_result['score']:.2f})")
83
-
84
- # # βœ… AI Chatbot Response
85
- # deepseek_response = deepseek_pipeline(user_input, max_length=100, do_sample=True)[0]['generated_text']
86
- # st.write(f"πŸ€– **Chatbot:** {deepseek_response}")
 
1
+ import gradio as gr
2
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
 
4
  # Load chatbot model
 
9
  # Load emotion detection model
10
  emotion_pipeline = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion")
11
 
12
+ # Function to generate chatbot response and emotion analysis
13
+ def generate_response(user_input):
14
+ # Generate chatbot response
15
+ input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
16
+ output = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
17
+ response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
18
 
19
+ # Detect emotion
20
+ emotion_result = emotion_pipeline(user_input)
21
+ emotion = emotion_result[0]["label"]
22
 
23
+ # Return chatbot response and detected emotion
24
+ return response, f"Emotion Detected: {emotion}"
25
 
26
+ # Gradio interface setup
27
+ iface = gr.Interface(
28
+ fn