Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,49 +1,57 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
|
|
3 |
from tensorflow.keras.models import load_model
|
4 |
from gtts import gTTS
|
5 |
import tempfile
|
|
|
6 |
|
7 |
-
|
|
|
|
|
|
|
|
|
8 |
model = load_model("emotion_model.h5")
|
9 |
emotion_labels = ['Angry','Disgust','Fear','Happy','Sad','Surprise','Neutral']
|
10 |
|
11 |
def detect_emotion(img):
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
def gpt_counsel_response(emotion, user_input=""):
|
17 |
-
prompt = f"You are a friendly emotional counselor chatbot. A user is feeling {emotion.lower()}. "
|
18 |
-
if user_input:
|
19 |
-
prompt += f"They said: '{user_input}'. Respond with empathy and support."
|
20 |
-
else:
|
21 |
-
prompt += "Gently ask how they are feeling and offer motivation or comfort."
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
return response.choices[0].message.content.strip()
|
29 |
|
30 |
-
def
|
31 |
emotion = detect_emotion(image)
|
|
|
32 |
if emotion in ["Sad", "Angry", "Fear"]:
|
33 |
-
|
|
|
|
|
|
|
34 |
else:
|
35 |
reply = f"You look {emotion.lower()}! Keep shining ☀️"
|
36 |
|
37 |
tts = gTTS(text=reply)
|
38 |
audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
|
39 |
tts.save(audio_file.name)
|
40 |
-
return reply, audio_file.name
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
46 |
outputs=["text", "audio"],
|
47 |
-
title="SmartMirror
|
48 |
-
description="
|
49 |
-
)
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
from tensorflow.keras.models import load_model
|
5 |
from gtts import gTTS
|
6 |
import tempfile
|
7 |
+
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
|
8 |
|
9 |
+
# Load chatbot model
|
10 |
+
tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
|
11 |
+
chatbot = BlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B")
|
12 |
+
|
13 |
+
# Load emotion model
|
14 |
model = load_model("emotion_model.h5")
|
15 |
emotion_labels = ['Angry','Disgust','Fear','Happy','Sad','Surprise','Neutral']
|
16 |
|
17 |
def detect_emotion(img):
|
18 |
+
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
19 |
+
resized = cv2.resize(gray, (48, 48)).reshape(1, 48, 48, 1) / 255.0
|
20 |
+
prediction = model.predict(resized)
|
21 |
+
return emotion_labels[np.argmax(prediction)]
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
def get_chatbot_reply(user_message):
|
24 |
+
inputs = tokenizer([user_message], return_tensors="pt")
|
25 |
+
reply_ids = chatbot.generate(**inputs)
|
26 |
+
reply = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
|
27 |
+
return reply
|
|
|
28 |
|
29 |
+
def smart_mirror(image, user_input):
|
30 |
emotion = detect_emotion(image)
|
31 |
+
|
32 |
if emotion in ["Sad", "Angry", "Fear"]:
|
33 |
+
if not user_input.strip():
|
34 |
+
reply = f"You seem {emotion.lower()}. Want to talk about it?"
|
35 |
+
else:
|
36 |
+
reply = get_chatbot_reply(user_input)
|
37 |
else:
|
38 |
reply = f"You look {emotion.lower()}! Keep shining ☀️"
|
39 |
|
40 |
tts = gTTS(text=reply)
|
41 |
audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
|
42 |
tts.save(audio_file.name)
|
|
|
43 |
|
44 |
+
return f"Emotion: {emotion}\nResponse: {reply}", audio_file.name
|
45 |
+
|
46 |
+
interface = gr.Interface(
|
47 |
+
fn=smart_mirror,
|
48 |
+
inputs=[
|
49 |
+
gr.Image(type="numpy", label="Upload or Capture Image"),
|
50 |
+
gr.Textbox(lines=2, placeholder="Say something...", label="Your Message (optional)")
|
51 |
+
],
|
52 |
outputs=["text", "audio"],
|
53 |
+
title="SmartMirror Counseling AI",
|
54 |
+
description="An AI Smart Mirror that detects your mood and supports you like a real friend."
|
55 |
+
)
|
56 |
+
|
57 |
+
interface.launch()
|