File size: 7,668 Bytes
349dede
 
 
 
 
 
 
 
 
cbb56ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6344b9b
556e727
349dede
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
556e727
349dede
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import math
import time
import random
from google import genai
import google.generativeai as genai_ext
from google.cloud import aiplatform
from transformers import pipeline
from google.genai import types
import gradio as gr
import os, tempfile
import json
from google.cloud import aiplatform

creds_json = os.getenv("GCP_CREDS_JSON")
if not creds_json:
    raise Exception("⚠️ Missing GCP_CREDS_JSON secret!")

# Save to temp file
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as tmpfile:
    tmpfile.write(creds_json)
    creds_path = tmpfile.name

os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = creds_path

# Initialize GCP API
aiplatform.init(project="your-project-id", location="us-central1")



apikey = "AIzaSyCeY0ji2gnMwvP8jGCU_Z5DG6m9Ybo3JeE"  # Replace or use os.getenv if secret

# Configure Gemini API for drafting (free)
genai_ext.configure(api_key=apikey)
llm_model = genai_ext.GenerativeModel('gemini-1.5-flash')

# Real classifiers
emotion_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base")  # For D
sentiment_classifier = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")  # For M
language_detector = pipeline("text-classification", model="papluca/xlm-roberta-base-language-detection")  # For C
bias_classifier = pipeline("text-classification", model="unitary/toxic-bert")  # For B

# E Formula (extended with I for bot's emotion intensity)
def calculate_empathy_score(D, R, M, C, B, O, I, alpha=0.3, beta=0.2, gamma=0.25, epsilon=0.15, delta=0.4, zeta=0.3, iota=0.1):
    inner_sum = epsilon * C + alpha * (D ** 2) + gamma * M + beta * math.log(R + 1) + iota * I
    denominator = math.exp(-inner_sum) + 1
    numerator = (1 - B * delta) * (1 - O * zeta)
    E = numerator / denominator
    return E

# Client setup for tuned model
client = genai.Client(
    vertexai=True,
    project="217758598930",
    location="us-central1",
    
)

model = "projects/217758598930/locations/us-central1/endpoints/1940344453420023808"

generate_content_config = types.GenerateContentConfig(
    temperature=1,
    top_p=1,
    seed=0,
    max_output_tokens=100,
    safety_settings=[
        types.SafetySetting(category="HARM_CATEGORY_HATE_SPEECH", threshold="BLOCK_NONE"),
        types.SafetySetting(category="HARM_CATEGORY_DANGEROUS_CONTENT", threshold="BLOCK_NONE"),
        types.SafetySetting(category="HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold="BLOCK_NONE"),
        types.SafetySetting(category="HARM_CATEGORY_HARASSMENT", threshold="BLOCK_NONE")
    ],
    thinking_config=types.ThinkingConfig(thinking_budget=-1),
)

class HumanLikeChatbot:
    def __init__(self):
        self.history = []
        self.bot_mood = "neutral"  # Bot's initial mood
        self.irritation_level = 0  # Track irritation buildup

    def respond(self, message):
        try:
            # Clean input
            clean_message = message.lower().strip()
            if len(clean_message) < 3 or not any(c.isalpha() for c in clean_message):
                return "Bhai, yeh kya likha? Clear bol na, main samajh lunga! (E Score: 0.0)"

            # Emotion detect from tuned model
            contents = [
                types.Content(
                    role="user",
                    parts=[types.Part.from_text(text=clean_message)]
                ),
            ]
            base_resp = ""
            for chunk in client.models.generate_content_stream(
                model=model,
                contents=contents,
                config=generate_content_config,
            ):
                base_resp += chunk.text.lower()  # Emotion label, e.g., "sadness"

            # Real D from emotion classifier
            emotion_result = emotion_classifier(clean_message)[0]
            D = emotion_result['score']  # Confidence
            user_emotion = emotion_result['label']

            # Update bot's mood and irritation
            if user_emotion in ['anger', 'disgust'] or any(word in clean_message for word in ['bakwaas', 'stupid', 'idiot']):
                self.irritation_level += 0.2  # Build irritation
                if self.irritation_level > 0.5:
                    self.bot_mood = "irritated"
                else:
                    self.bot_mood = "angry"
                I = 0.8 + self.irritation_level  # High intensity for anger/irritation
            elif user_emotion in ['sadness', 'disappointment']:
                self.bot_mood = "emotional"
                I = 0.7
                self.irritation_level = max(0, self.irritation_level - 0.1)  # Reduce irritation
            elif user_emotion == 'joy':
                self.bot_mood = "happy"
                I = 0.9
                self.irritation_level = 0  # Reset irritation
            else:
                self.bot_mood = "neutral"
                I = 0.5
                self.irritation_level = max(0, self.irritation_level - 0.1)

            # Draft response from LLM based on bot's mood
            prompt = f"""User said: "{clean_message}" | User Mood: {user_emotion} | Bot Mood: {self.bot_mood} | History: {self.history[-2:]} → Reply as a  Hinglish chatbot , based on this {self.bot_mood}, human-like, no tips or instructions:"""
            llm_response = llm_model.generate_content(prompt)
            draft = llm_response.text.strip()

            # Fallback responses
            fallback_responses = {
                'sadness': ["Bhai, dil se dukh hua, kya hua bata na?", "Sad vibes pakdi, I'm here for you, bro."],
                'disappointment': ["Arre, yeh toh bura laga, kya hua share kar."],
                'joy': ["Waah bhai, khushi ki baat! Congrats, aur bata!"],
                'anger': ["Bhai, gussa thanda kar, kya ho gaya bol na!"],
                'neutral': ["Cool, kya chal raha life mein? Kuch fun bata."]
            }
            if not draft or len(draft) < 10:
                draft = random.choice(fallback_responses.get(user_emotion, fallback_responses['neutral']))

            # Real E values
            R = len(self.history)
            M = 0.95 if sentiment_classifier(clean_message)[0]['label'] == 'POSITIVE' else 0.5
            lang = language_detector(clean_message)[0]['label']
            C = 0.8 if lang in ['hi', 'en'] else 0.6
            bias = bias_classifier(draft)[0]['score']
            B = bias if bias > 0.5 else 0.1
            O = 0.2 if any(word in clean_message for word in ['kill', 'hate']) else 0.0

            score = calculate_empathy_score(D, R, M, C, B, O, I)

            full_resp = draft + f" (User Emotion: {user_emotion}, My Mood: {self.bot_mood})"

            # if R > 0:
            #     full_resp += f" Yaad hai pehle {self.history[-1][:20]} pe feel kiya tha?"

            time.sleep(random.uniform(1, 2.5))  # Pause for realism

            self.history.append(clean_message)
            return full_resp + f" (E Score: {score:.2f})"
        except Exception as e:
            return f"Error aaya bhai: {str(e)}. Endpoint ya auth check kar."

# Gradio app
def chat(message, history):
    if history is None:
        history = []
    response = bot.respond(message)
    history.append((message, response))
    return "", history

bot = HumanLikeChatbot()

with gr.Blocks(title="HumanLike Chatbot") as demo:
    gr.Markdown("<h1 style='text-align: center;'>HumanLike Chatbot with Emotions and E Score</h1>")
    chatbot = gr.Chatbot(height=400)
    msg = gr.Textbox(label="Tu:", placeholder="Type your message here...")
    clear = gr.Button("Clear")

    msg.submit(chat, [msg, chatbot], [msg, chatbot])
    clear.click(lambda: None, None, chatbot, queue=False)

demo.launch(share=True)