aimyrajpoot commited on
Commit
b786c2d
·
verified ·
1 Parent(s): 43512f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -58
app.py CHANGED
@@ -1,61 +1,45 @@
1
  import gradio as gr
2
- import numpy as np
3
- import cv2
4
- from tensorflow.keras.models import load_model
5
- from gtts import gTTS
6
- import tempfile
7
  from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
8
 
9
- # Load chatbot model
10
- from transformers import pipeline
11
-
12
- chatbot = pipeline("conversational", model="facebook/blenderbot_small-90M")
13
-
14
-
15
- # Load emotion model
16
- model = load_model("emotion_model.h5")
17
- emotion_labels = ['Angry','Disgust','Fear','Happy','Sad','Surprise','Neutral']
18
-
19
- def detect_emotion(img):
20
- gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
21
- resized = cv2.resize(gray, (48, 48)).reshape(1, 48, 48, 1) / 255.0
22
- prediction = model.predict(resized)
23
- return emotion_labels[np.argmax(prediction)]
24
-
25
- def get_chatbot_reply(user_message):
26
- from transformers import Conversation
27
-
28
- def get_chatbot_reply(user_message):
29
- conversation = Conversation(user_message)
30
- result = chatbot(conversation)
31
- return result.generated_responses[-1]
32
-
33
- def smart_mirror(image, user_input):
34
- emotion = detect_emotion(image)
35
-
36
- if emotion in ["Sad", "Angry", "Fear"]:
37
- if not user_input.strip():
38
- reply = f"You seem {emotion.lower()}. Want to talk about it?"
39
- else:
40
- reply = get_chatbot_reply(user_input)
41
- else:
42
- reply = f"You look {emotion.lower()}! Keep shining ☀️"
43
-
44
- tts = gTTS(text=reply)
45
- audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
46
- tts.save(audio_file.name)
47
-
48
- return f"Emotion: {emotion}\nResponse: {reply}", audio_file.name
49
-
50
- interface = gr.Interface(
51
- fn=smart_mirror,
52
- inputs=[
53
- gr.Image(type="numpy", label="Upload or Capture Image"),
54
- gr.Textbox(lines=2, placeholder="Say something...", label="Your Message (optional)")
55
- ],
56
- outputs=["text", "audio"],
57
- title="SmartMirror Counseling AI",
58
- description="An AI Smart Mirror that detects your mood and supports you like a real friend."
59
- )
60
-
61
- interface.launch()
 
1
  import gradio as gr
 
 
 
 
 
2
  from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
3
 
4
+ # Load a lightweight model to fit in Spaces memory
5
+ model_name = "facebook/blenderbot_small-90M"
6
+ tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
7
+ model = BlenderbotForConditionalGeneration.from_pretrained(model_name)
8
+
9
+ # Conversation history
10
+ chat_history = ""
11
+
12
+ def chatbot_response(user_message):
13
+ global chat_history
14
+ counseling_prefix = (
15
+ "You are a friendly counselor and caring friend. "
16
+ "When the user is sad, comfort them with empathy and motivational quotes or jokes. "
17
+ "When the user is happy, encourage and celebrate with them.\n"
18
+ )
19
+
20
+ # Append to conversation
21
+ full_input = counseling_prefix + chat_history + f"User: {user_message}\nAI:"
22
+ inputs = tokenizer([full_input], return_tensors="pt")
23
+ reply_ids = model.generate(**inputs, max_length=200, pad_token_id=tokenizer.eos_token_id)
24
+ reply = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
25
+
26
+ # Save conversation
27
+ chat_history += f"User: {user_message}\nAI: {reply}\n"
28
+ return reply
29
+
30
+ # Create Gradio interface
31
+ with gr.Blocks() as demo:
32
+ gr.Markdown("<h1 style='text-align:center;'>🤖 Counseling Chatbot</h1><p style='text-align:center;'>Your caring AI friend</p>")
33
+ chatbot_ui = gr.Chatbot()
34
+ user_input = gr.Textbox(placeholder="Type your message here...", label="Your message")
35
+
36
+ def respond(message, history):
37
+ bot_reply = chatbot_response(message)
38
+ history.append((message, bot_reply))
39
+ return history, ""
40
+
41
+ user_input.submit(respond, [user_input, chatbot_ui], [chatbot_ui, user_input])
42
+
43
+ # Launch app
44
+ if __name__ == "__main__":
45
+ demo.launch()