Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,57 @@
|
|
1 |
-
|
2 |
import streamlit as st
|
3 |
from transformers import pipeline
|
4 |
import google.generativeai as genai
|
5 |
import json
|
6 |
import random
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
# Load the JSON data
|
9 |
with open('emotion_templates.json', 'r') as f:
|
10 |
data = json.load(f)
|
11 |
|
12 |
# Configure Gemini (replace with your API key)
|
13 |
genai.configure(api_key="AIzaSyCYRYNwCU1f9cgJYn8pd86Xcf6hiSMwJr0")
|
14 |
-
|
15 |
model = genai.GenerativeModel('gemini-2.0-flash')
|
16 |
|
17 |
-
|
18 |
def generate_text(prompt, context=""):
|
19 |
"""
|
20 |
Generates text using the Gemini model.
|
@@ -26,75 +63,82 @@ def generate_text(prompt, context=""):
|
|
26 |
print(f"Error generating text: {e}")
|
27 |
return "I am sorry, I encountered an error while generating the text."
|
28 |
|
29 |
-
def create_prompt(emotion, topic
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
prompt = prefix_prompt + prompt + subfix_prompt
|
48 |
-
return prompt
|
49 |
-
|
50 |
|
51 |
-
|
52 |
# 1. Emotion Detection Model (Using Hugging Face's transformer)
|
53 |
-
|
54 |
-
emotion_classifier = pipeline("text-classification", model="AnasAlokla/multilingual_go_emotions") # Or choose another model
|
55 |
|
56 |
# 2. Conversational Agent Logic
|
57 |
def get_ai_response(user_input, emotion_predictions):
|
58 |
"""Generates AI response based on user input and detected emotions."""
|
59 |
-
|
60 |
-
|
61 |
dominant_emotion = None
|
62 |
max_score = 0
|
63 |
responses = None
|
64 |
for prediction in emotion_predictions:
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
prompt_text = create_prompt(dominant_emotion,user_input)
|
71 |
-
|
72 |
responses = generate_text(prompt_text)
|
73 |
|
74 |
# Handle cases where no specific emotion is clear
|
75 |
if dominant_emotion is None:
|
76 |
-
|
77 |
else:
|
78 |
return responses
|
79 |
|
80 |
# 3. Streamlit Frontend
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
3 |
import google.generativeai as genai
|
4 |
import json
|
5 |
import random
|
6 |
|
7 |
+
# Language dictionary for interface translations
|
8 |
+
LANGUAGES = {
|
9 |
+
'English': {
|
10 |
+
'title': 'Emotionally Aware Chatbot',
|
11 |
+
'input_placeholder': 'Enter your message:',
|
12 |
+
'emotions_header': 'Detected Emotions:',
|
13 |
+
'response_header': 'AI Response:'
|
14 |
+
},
|
15 |
+
'Arabic': {
|
16 |
+
'title': 'روبوت الدردشة العاطفي',
|
17 |
+
'input_placeholder': 'أدخل رسالتك:',
|
18 |
+
'emotions_header': 'العواطف المكتشفة:',
|
19 |
+
'response_header': 'استجابة الذكاء الاصطناعي:'
|
20 |
+
},
|
21 |
+
'French': {
|
22 |
+
'title': 'Chatbot Émotionnel',
|
23 |
+
'input_placeholder': 'Entrez votre message:',
|
24 |
+
'emotions_header': 'Émotions Détectées:',
|
25 |
+
'response_header': 'Réponse IA:'
|
26 |
+
},
|
27 |
+
'Spanish': {
|
28 |
+
'title': 'Chatbot Emocional',
|
29 |
+
'input_placeholder': 'Introduzca su mensaje:',
|
30 |
+
'emotions_header': 'Emociones Detectadas:',
|
31 |
+
'response_header': 'Respuesta de IA:'
|
32 |
+
},
|
33 |
+
'Dutch': {
|
34 |
+
'title': 'Emotioneel Chatbot',
|
35 |
+
'input_placeholder': 'Voer uw bericht in:',
|
36 |
+
'emotions_header': 'Gedetecteerde Emoties:',
|
37 |
+
'response_header': 'AI-Antwoord:'
|
38 |
+
},
|
39 |
+
'Turkish': {
|
40 |
+
'title': 'Duygusal Sohbet Robotu',
|
41 |
+
'input_placeholder': 'Mesajınızı girin:',
|
42 |
+
'emotions_header': 'Algılanan Duygular:',
|
43 |
+
'response_header': 'Yapay Zeka Yanıtı:'
|
44 |
+
}
|
45 |
+
}
|
46 |
+
|
47 |
# Load the JSON data
|
48 |
with open('emotion_templates.json', 'r') as f:
|
49 |
data = json.load(f)
|
50 |
|
51 |
# Configure Gemini (replace with your API key)
|
52 |
genai.configure(api_key="AIzaSyCYRYNwCU1f9cgJYn8pd86Xcf6hiSMwJr0")
|
|
|
53 |
model = genai.GenerativeModel('gemini-2.0-flash')
|
54 |
|
|
|
55 |
def generate_text(prompt, context=""):
|
56 |
"""
|
57 |
Generates text using the Gemini model.
|
|
|
63 |
print(f"Error generating text: {e}")
|
64 |
return "I am sorry, I encountered an error while generating the text."
|
65 |
|
66 |
+
def create_prompt(emotion, topic=None):
|
67 |
+
"""
|
68 |
+
Chooses a random prompt from the template list.
|
69 |
+
"""
|
70 |
+
templates = data["emotion_templates"][emotion]
|
71 |
+
prompt = random.choice(templates)
|
72 |
+
if topic:
|
73 |
+
# Replace various placeholders in the prompt
|
74 |
+
placeholders = ["[topic/person]", "[topic]", "[person]", "[object]", "[outcome]"]
|
75 |
+
for placeholder in placeholders:
|
76 |
+
prompt = prompt.replace(placeholder, topic)
|
77 |
+
|
78 |
+
subfix_prompt = "Make the generated text in the same language as the topic.\n"
|
79 |
+
subfix_prompt += "Make the generated text short.\n"
|
80 |
+
|
81 |
+
prefix_prompt = "## topic language and content\n" + topic
|
82 |
+
prompt = prefix_prompt + prompt + subfix_prompt
|
83 |
+
return prompt
|
|
|
|
|
|
|
84 |
|
|
|
85 |
# 1. Emotion Detection Model (Using Hugging Face's transformer)
|
86 |
+
emotion_classifier = pipeline("text-classification", model="AnasAlokla/multilingual_go_emotions")
|
|
|
87 |
|
88 |
# 2. Conversational Agent Logic
|
89 |
def get_ai_response(user_input, emotion_predictions):
|
90 |
"""Generates AI response based on user input and detected emotions."""
|
|
|
|
|
91 |
dominant_emotion = None
|
92 |
max_score = 0
|
93 |
responses = None
|
94 |
for prediction in emotion_predictions:
|
95 |
+
if prediction['score'] > max_score:
|
96 |
+
max_score = prediction['score']
|
97 |
+
dominant_emotion = prediction['label']
|
98 |
+
|
99 |
+
prompt_text = create_prompt(dominant_emotion, user_input)
|
|
|
|
|
100 |
responses = generate_text(prompt_text)
|
101 |
|
102 |
# Handle cases where no specific emotion is clear
|
103 |
if dominant_emotion is None:
|
104 |
+
return "Error for response"
|
105 |
else:
|
106 |
return responses
|
107 |
|
108 |
# 3. Streamlit Frontend
|
109 |
+
def main():
|
110 |
+
# Language Selection
|
111 |
+
selected_language = st.sidebar.selectbox(
|
112 |
+
"Select Interface Language",
|
113 |
+
list(LANGUAGES.keys()),
|
114 |
+
index=0 # Default to English
|
115 |
+
)
|
116 |
+
|
117 |
+
# Set page title and header based on selected language
|
118 |
+
st.title(LANGUAGES[selected_language]['title'])
|
119 |
+
|
120 |
+
# Input Text Box
|
121 |
+
user_input = st.text_input(
|
122 |
+
LANGUAGES[selected_language]['input_placeholder'],
|
123 |
+
""
|
124 |
+
)
|
125 |
+
|
126 |
+
if user_input:
|
127 |
+
# Emotion Detection
|
128 |
+
emotion_predictions = emotion_classifier(user_input)
|
129 |
+
|
130 |
+
# Display Emotions
|
131 |
+
st.subheader(LANGUAGES[selected_language]['emotions_header'])
|
132 |
+
for prediction in emotion_predictions:
|
133 |
+
st.write(f"- {prediction['label']}: {prediction['score']:.2f}")
|
134 |
+
|
135 |
+
# Get AI Response
|
136 |
+
ai_response = get_ai_response(user_input, emotion_predictions)
|
137 |
+
|
138 |
+
# Display AI Response
|
139 |
+
st.subheader(LANGUAGES[selected_language]['response_header'])
|
140 |
+
st.write(ai_response)
|
141 |
+
|
142 |
+
# Run the main function
|
143 |
+
if __name__ == "__main__":
|
144 |
+
main()
|