rithvick commited on
Commit
5c92964
·
verified ·
1 Parent(s): 32a05cd

fixed some

Browse files
Files changed (1) hide show
  1. app.py +81 -30
app.py CHANGED
@@ -36,7 +36,6 @@ llm_model = genai_ext.GenerativeModel('gemini-1.5-pro')
36
 
37
  # --- Classifier pipelines ---
38
  emotion_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base") # D
39
- # We removed the separate sentiment classifier and will use toxicity for M/B
40
  language_detector = pipeline("text-classification", model="papluca/xlm-roberta-base-language-detection") # C
41
  bias_classifier = pipeline("text-classification", model="unitary/toxic-bert") # toxicity -> used for M and B
42
 
@@ -63,7 +62,6 @@ generate_content_config = types.GenerateContentConfig(
63
  top_p=0.95,
64
  seed=0,
65
  max_output_tokens=150,
66
- # Keep safety settings tuned to your policy
67
  safety_settings=[
68
  types.SafetySetting(category="HARM_CATEGORY_HATE_SPEECH", threshold="BLOCK_NONE"),
69
  types.SafetySetting(category="HARM_CATEGORY_DANGEROUS_CONTENT", threshold="BLOCK_NONE"),
@@ -87,21 +85,58 @@ def detect_hinglish(text, lang_label):
87
  # quick romanized-hindi check
88
  if any(tok in HINDI_KEYWORDS for tok in text_tokens):
89
  return True
90
- # if label is ambiguous 'xx' or returns 'en' but contains Devanagari characters
91
  if any('\u0900' <= ch <= '\u097F' for ch in text):
92
  return True
93
  return False
94
 
95
 
96
- # --- Chatbot class with fixes applied ---
97
  class HumanLikeChatbot:
98
  def __init__(self):
99
- self.history = []
 
 
 
 
 
100
  self.bot_mood = "neutral"
101
  self.irritation_level = 0.0
102
  self.toxicity_history = [] # rolling window
103
  self.repair_cooldown = 0 # turns left where bot prioritizes repair
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  def _update_irritation_decay(self):
106
  # general slow decay each turn
107
  if self.irritation_level > 0:
@@ -113,6 +148,19 @@ class HumanLikeChatbot:
113
  if self.irritation_level <= 0.15:
114
  self.bot_mood = "neutral"
115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  def respond(self, message):
117
  try:
118
  clean_message = message.lower().strip()
@@ -124,6 +172,9 @@ class HumanLikeChatbot:
124
  D = float(emotion_result.get('score', 0.0))
125
  user_emotion = emotion_result.get('label', 'neutral')
126
 
 
 
 
127
  # --- Update bot mood & intensity (I) with inertia ---
128
  if user_emotion in ['anger', 'disgust'] or any(word in clean_message for word in ['stupid', 'idiot', 'dumb']):
129
  self.irritation_level = min(1.0, self.irritation_level + 0.25)
@@ -144,25 +195,30 @@ class HumanLikeChatbot:
144
  I = 0.5
145
  self.irritation_level = max(0.0, self.irritation_level - 0.05)
146
 
147
- # --- Draft generation from LLM (Gemini) ---
 
 
148
  prompt = (
149
- f'User said: "{clean_message}" | User Mood: {user_emotion} | Bot Mood: {self.bot_mood} '
150
- f'| History: {self.history[-4:]} Reply as an English chatbot, human-like, based on this {self.bot_mood}. '
151
- 'NO instructions or tips — just a natural, empathetic response.'
 
152
  )
 
 
153
  try:
154
  llm_response = llm_model.generate_content(prompt)
155
  draft = llm_response.text.strip()
156
  except Exception:
157
  draft = ""
158
 
159
- # Fallbacks
160
  fallback_responses = {
161
- 'sadness': ["Bhai, dil se dukh hua, kya hua bata na?", "Sad vibes pakdi, I'm here for you, bro."],
162
- 'disappointment': ["Arre, yeh toh bura laga, kya hua share kar."],
163
- 'joy': ["Waah bhai, khushi ki baat! Congrats, aur bata!"],
164
- 'anger': ["Bhai, gussa thanda kar, kya ho gaya bol na!"],
165
- 'neutral': ["Cool, kya chal raha life mein? Kuch fun bata."]
166
  }
167
  if not draft or len(draft) < 8:
168
  draft = random.choice(fallback_responses.get(user_emotion, fallback_responses['neutral']))
@@ -172,10 +228,8 @@ class HumanLikeChatbot:
172
 
173
  # Toxicity from bias_classifier on user message (we keep rolling average)
174
  tox = float(bias_classifier(clean_message)[0].get('score', 0.0))
175
- self.toxicity_history.append(tox)
176
- if len(self.toxicity_history) > 5:
177
- self.toxicity_history.pop(0)
178
- avg_toxicity = sum(self.toxicity_history) / len(self.toxicity_history)
179
 
180
  # Moral judgment (M) based on average toxicity
181
  M = max(0.4, 0.95 - avg_toxicity)
@@ -204,29 +258,26 @@ class HumanLikeChatbot:
204
  # --- Self-repair / calming behavior ---
205
  if score < 0.50 and self.repair_cooldown == 0:
206
  # Replace draft with a calming repair message and enter cooldown to avoid loop
207
- draft = "Bhai, lagta hai hum thoda off ho gaye. Main yahan hoon batao kya chal raha hai, main sun raha hoon."
208
  self.repair_cooldown = 2 # next 2 turns prioritize repair
209
 
210
  # If in repair cooldown, slightly prioritize calm tone generation (best-effort)
211
  if self.repair_cooldown > 0:
212
  self.repair_cooldown -= 1
213
- # small nudge: ensure draft contains supportive phrase
214
- if 'main' not in draft and random.random() < 0.6:
215
- draft = "Bhai, main yahan hoon. Agar tum chaaho toh batao — main sun raha hoon."
216
 
217
  # --- Update irritation decay after response ---
218
  self._update_irritation_decay()
219
 
220
- # --- Append to history and return response ---
221
- full_resp = draft + f" (User Emotion: {user_emotion}, My Mood: {self.bot_mood})"
222
 
223
  # Slight thinking pause
224
- time.sleep(random.uniform(0.6, 1.6))
225
-
226
- # Save conversational state
227
- self.history.append(clean_message)
228
 
229
  # Return message with empathy score
 
230
  return full_resp + f" (E Score: {score:.2f})"
231
 
232
  except Exception as e:
@@ -246,7 +297,7 @@ def chat(message, history):
246
  bot = HumanLikeChatbot()
247
 
248
  with gr.Blocks(title="HumanLike Chatbot") as demo:
249
- gr.Markdown("<h1 style='text-align: center;'>HumanLike Chatbot with Emotions and E Score (Updated)</h1>")
250
  chatbot = gr.Chatbot(height=400)
251
  msg = gr.Textbox(label="You:", placeholder="Type your message here...")
252
  clear = gr.Button("Clear")
 
36
 
37
  # --- Classifier pipelines ---
38
  emotion_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base") # D
 
39
  language_detector = pipeline("text-classification", model="papluca/xlm-roberta-base-language-detection") # C
40
  bias_classifier = pipeline("text-classification", model="unitary/toxic-bert") # toxicity -> used for M and B
41
 
 
62
  top_p=0.95,
63
  seed=0,
64
  max_output_tokens=150,
 
65
  safety_settings=[
66
  types.SafetySetting(category="HARM_CATEGORY_HATE_SPEECH", threshold="BLOCK_NONE"),
67
  types.SafetySetting(category="HARM_CATEGORY_DANGEROUS_CONTENT", threshold="BLOCK_NONE"),
 
85
  # quick romanized-hindi check
86
  if any(tok in HINDI_KEYWORDS for tok in text_tokens):
87
  return True
88
+ # if label is ambiguous or contains Devanagari characters
89
  if any('\u0900' <= ch <= '\u097F' for ch in text):
90
  return True
91
  return False
92
 
93
 
94
+ # --- Chatbot class with full history & fixes applied ---
95
  class HumanLikeChatbot:
96
  def __init__(self):
97
+ # raw history to display in UI
98
+ self.history = [] # list of tuples (user_msg, bot_reply)
99
+ # structured history with emotions and moods for LLM prompting
100
+ # list of tuples: (speaker, message, detected_emotion, bot_mood_at_time)
101
+ self.history_with_emotions = []
102
+
103
  self.bot_mood = "neutral"
104
  self.irritation_level = 0.0
105
  self.toxicity_history = [] # rolling window
106
  self.repair_cooldown = 0 # turns left where bot prioritizes repair
107
 
108
+ def add_to_history(self, speaker, message, detected_emotion=None, mood_at_time=None, bot_reply=None):
109
+ """Add entries to both UI history and structured history.
110
+ speaker: 'User' or 'Bot'
111
+ message: text
112
+ detected_emotion: emotion label detected for user messages
113
+ mood_at_time: bot mood when message was produced
114
+ bot_reply: if speaker=='User' and we also want to save the bot reply for UI"""
115
+ if speaker == 'User':
116
+ # append a placeholder for bot reply in UI history; will be updated when bot responds
117
+ self.history.append((message, bot_reply if bot_reply is not None else ""))
118
+ self.history_with_emotions.append(('User', message, detected_emotion, mood_at_time))
119
+ else:
120
+ # speaker is Bot: attach reply to latest UI entry
121
+ if self.history:
122
+ last_user, _ = self.history[-1]
123
+ self.history[-1] = (last_user, message)
124
+ else:
125
+ # no user entry (unlikely) — just append
126
+ self.history.append(("", message))
127
+ self.history_with_emotions.append(('Bot', message, detected_emotion, mood_at_time))
128
+
129
+ def format_history_for_prompt(self, limit=8):
130
+ """Return a formatted string of the recent structured history suitable for the LLM prompt."""
131
+ recent = self.history_with_emotions[-limit:]
132
+ lines = []
133
+ for speaker, msg, emo, mood in recent:
134
+ if speaker == 'User':
135
+ lines.append(f"User ({emo if emo else 'N/A'}): {msg}")
136
+ else:
137
+ lines.append(f"Bot ({mood if mood else 'N/A'}): {msg}")
138
+ return "\n".join(lines)
139
+
140
  def _update_irritation_decay(self):
141
  # general slow decay each turn
142
  if self.irritation_level > 0:
 
148
  if self.irritation_level <= 0.15:
149
  self.bot_mood = "neutral"
150
 
151
+ def update_toxicity_history(self, tox_score):
152
+ self.toxicity_history.append(float(tox_score))
153
+ if len(self.toxicity_history) > 5:
154
+ self.toxicity_history.pop(0)
155
+
156
+ def average_toxicity(self):
157
+ if not self.toxicity_history:
158
+ return 0.0
159
+ return sum(self.toxicity_history) / len(self.toxicity_history)
160
+
161
+ def should_prioritize_repair(self):
162
+ return self.repair_cooldown > 0 or self.average_toxicity() > 0.6
163
+
164
  def respond(self, message):
165
  try:
166
  clean_message = message.lower().strip()
 
172
  D = float(emotion_result.get('score', 0.0))
173
  user_emotion = emotion_result.get('label', 'neutral')
174
 
175
+ # Record user message in structured history (bot_mood_at_time will be set before bot reply)
176
+ self.add_to_history('User', clean_message, detected_emotion=user_emotion, mood_at_time=self.bot_mood)
177
+
178
  # --- Update bot mood & intensity (I) with inertia ---
179
  if user_emotion in ['anger', 'disgust'] or any(word in clean_message for word in ['stupid', 'idiot', 'dumb']):
180
  self.irritation_level = min(1.0, self.irritation_level + 0.25)
 
195
  I = 0.5
196
  self.irritation_level = max(0.0, self.irritation_level - 0.05)
197
 
198
+ # --- Build formatted emotional history for prompt ---
199
+ formatted_history = self.format_history_for_prompt(limit=8)
200
+
201
  prompt = (
202
+ f"Conversation so far:\n{formatted_history}\n"
203
+ f"Now, the user just said: \"{clean_message}\" (Current Emotion: {user_emotion}) \n"
204
+ f"Bot Current Mood: {self.bot_mood}\n"
205
+ "Reply as an empathetic, human-like chatbot, keeping emotional consistency with the past conversation."
206
  )
207
+
208
+ # --- Draft generation from LLM (Gemini) ---
209
  try:
210
  llm_response = llm_model.generate_content(prompt)
211
  draft = llm_response.text.strip()
212
  except Exception:
213
  draft = ""
214
 
215
+ # Fallbacks (English, warm)
216
  fallback_responses = {
217
+ 'sadness': ["Bro, I’m really sorry to hear that. Come on, tell me, I’ll just listen. ❤️", "I can feel the sad vibes. Im here for you, bro."],
218
+ 'disappointment': ["Man, that really sucks. Tell me what exactly happened?", "I get it — expectations were high. Tell me more."],
219
+ 'joy': ["Wow! That’s a celebration moment. 🥳", "Bro, this calls for a party! Give me the details."],
220
+ 'anger': ["Bro, cool down a bit, tell me what’s wrong. 😌", "Looks like something serious happened. I’m here to listen."],
221
+ 'neutral': ["Alright, got it. So what’s going on in life?", "Cool, so how’s your day going?"]
222
  }
223
  if not draft or len(draft) < 8:
224
  draft = random.choice(fallback_responses.get(user_emotion, fallback_responses['neutral']))
 
228
 
229
  # Toxicity from bias_classifier on user message (we keep rolling average)
230
  tox = float(bias_classifier(clean_message)[0].get('score', 0.0))
231
+ self.update_toxicity_history(tox)
232
+ avg_toxicity = self.average_toxicity()
 
 
233
 
234
  # Moral judgment (M) based on average toxicity
235
  M = max(0.4, 0.95 - avg_toxicity)
 
258
  # --- Self-repair / calming behavior ---
259
  if score < 0.50 and self.repair_cooldown == 0:
260
  # Replace draft with a calming repair message and enter cooldown to avoid loop
261
+ draft = "Bro, I think we got off track. I care about what you’re feeling tell me what's really going on."
262
  self.repair_cooldown = 2 # next 2 turns prioritize repair
263
 
264
  # If in repair cooldown, slightly prioritize calm tone generation (best-effort)
265
  if self.repair_cooldown > 0:
266
  self.repair_cooldown -= 1
267
+ if 'i' not in draft.lower() and random.random() < 0.6:
268
+ draft = "Bro, I’m here. If you want to talk, I’m listening."
 
269
 
270
  # --- Update irritation decay after response ---
271
  self._update_irritation_decay()
272
 
273
+ # --- Add bot reply to history structures ---
274
+ self.add_to_history('Bot', draft, detected_emotion=None, mood_at_time=self.bot_mood, bot_reply=draft)
275
 
276
  # Slight thinking pause
277
+ time.sleep(random.uniform(0.6, 1.2))
 
 
 
278
 
279
  # Return message with empathy score
280
+ full_resp = draft + f" (User Emotion: {user_emotion}, My Mood: {self.bot_mood})"
281
  return full_resp + f" (E Score: {score:.2f})"
282
 
283
  except Exception as e:
 
297
  bot = HumanLikeChatbot()
298
 
299
  with gr.Blocks(title="HumanLike Chatbot") as demo:
300
+ gr.Markdown("<h1 style='text-align: center;'>HumanLike Chatbot with Emotions and E Score (v2)</h1>")
301
  chatbot = gr.Chatbot(height=400)
302
  msg = gr.Textbox(label="You:", placeholder="Type your message here...")
303
  clear = gr.Button("Clear")