File size: 6,992 Bytes
c885400
f4dcd5d
d581a83
 
 
e4fe643
d581a83
 
c885400
0ce1d0f
c885400
d581a83
 
 
 
 
 
 
c885400
d581a83
 
 
c885400
d581a83
 
 
 
b5fc4a7
 
0ce1d0f
b5fc4a7
0ce1d0f
d581a83
 
0ce1d0f
d581a83
 
 
 
 
 
b5fc4a7
d581a83
 
c885400
 
 
 
 
d581a83
 
c885400
 
d581a83
c885400
 
0ce1d0f
d581a83
 
 
 
 
 
c885400
d581a83
 
 
 
f4dcd5d
b5fc4a7
0ce1d0f
d581a83
 
b5fc4a7
 
 
 
 
 
 
 
 
 
 
 
f4dcd5d
b5fc4a7
c885400
b5fc4a7
f4dcd5d
 
b5fc4a7
 
f4dcd5d
 
b5fc4a7
 
0ce1d0f
b5fc4a7
 
f4dcd5d
 
b5fc4a7
 
 
0ce1d0f
f4dcd5d
b5fc4a7
f4dcd5d
b5fc4a7
0ce1d0f
 
f4dcd5d
b5fc4a7
 
 
f4dcd5d
0ce1d0f
f4dcd5d
 
b5fc4a7
 
0ce1d0f
b5fc4a7
 
0ce1d0f
 
b5fc4a7
 
 
0ce1d0f
 
 
 
b5fc4a7
 
0ce1d0f
 
 
 
 
 
 
 
 
 
f4dcd5d
 
b5fc4a7
f4dcd5d
 
 
 
 
 
b5fc4a7
 
f4dcd5d
 
 
 
 
b5fc4a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ce1d0f
 
b5fc4a7
 
 
 
f4dcd5d
 
0ce1d0f
b5fc4a7
0ce1d0f
 
b5fc4a7
 
 
 
 
0ce1d0f
 
b5fc4a7
0ce1d0f
 
 
 
 
 
b5fc4a7
0ce1d0f
b5fc4a7
0ce1d0f
 
f4dcd5d
 
b5fc4a7
f4dcd5d
d581a83
b5fc4a7
 
f4dcd5d
c885400
f4dcd5d
c885400
f4dcd5d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
import gradio as gr
import pandas as pd
import plotly.express as px
import shutil
import os
import torch
from huggingface_hub import hf_hub_download
from importlib import import_module

# Load inference.py and model
repo_id = "logasanjeev/goemotions-bert"
local_file = hf_hub_download(repo_id=repo_id, filename="inference.py")
print("Downloaded inference.py successfully!")

current_dir = os.getcwd()
destination = os.path.join(current_dir, "inference.py")
shutil.copy(local_file, destination)
print("Copied inference.py to current directory!")

inference_module = import_module("inference")
predict_emotions = inference_module.predict_emotions
print("Imported predict_emotions successfully!")

_, _ = predict_emotions("dummy text")
emotion_labels = inference_module.EMOTION_LABELS
default_thresholds = inference_module.THRESHOLDS

# Prediction function (simplified, no export)
def predict_emotions_with_details(text, confidence_threshold=0.0):
    if not text.strip():
        return "Please enter some text.", "", None
    
    predictions_str, processed_text = predict_emotions(text)
    
    # Parse predictions
    predictions = []
    if predictions_str != "No emotions predicted.":
        for line in predictions_str.split("\n"):
            emotion, confidence = line.split(": ")
            predictions.append((emotion, float(confidence)))
    
    # Get raw logits for top 5 (though not displayed in this simplified version)
    encodings = inference_module.TOKENIZER(
        processed_text,
        padding='max_length',
        truncation=True,
        max_length=128,
        return_tensors='pt'
    )
    input_ids = encodings['input_ids'].to(inference_module.DEVICE)
    attention_mask = encodings['attention_mask'].to(inference_module.DEVICE)
    
    with torch.no_grad():
        outputs = inference_module.MODEL(input_ids, attention_mask=attention_mask)
        logits = torch.sigmoid(outputs.logits).cpu().numpy()[0]
    
    # Filter predictions based on threshold
    filtered_predictions = []
    for emotion, confidence in predictions:
        thresh = default_thresholds[emotion_labels.index(emotion)]
        adjusted_thresh = max(thresh, confidence_threshold)
        if confidence >= adjusted_thresh:
            filtered_predictions.append((emotion, confidence))
    
    if not filtered_predictions:
        thresholded_output = "No emotions predicted above thresholds."
    else:
        thresholded_output = "\n".join([f"{emotion}: {confidence:.4f}" for emotion, confidence in filtered_predictions])
    
    # Create bar chart
    fig = None
    if filtered_predictions:
        df = pd.DataFrame(filtered_predictions, columns=["Emotion", "Confidence"])
        fig = px.bar(
            df,
            x="Emotion",
            y="Confidence",
            color="Emotion",
            text="Confidence",
            title="Emotion Confidence Levels",
            height=300,
            color_discrete_sequence=px.colors.qualitative.Pastel
        )
        fig.update_traces(texttemplate='%{text:.2f}', textposition='auto')
        fig.update_layout(showlegend=False, margin=dict(t=40, b=40), xaxis_title="", yaxis_title="Confidence")
    
    return processed_text, thresholded_output, fig

# Simplified CSS
custom_css = """
body {
    font-family: 'Arial', sans-serif;
    background-color: #f9f9f9;
}
.gr-panel {
    border-radius: 8px;
    box-shadow: 0 2px 10px rgba(0,0,0,0.05);
    background: white;
    padding: 15px;
    margin-bottom: 15px;
}
.gr-button {
    border-radius: 6px;
    padding: 10px 20px;
    font-weight: 500;
    background: #4a90e2;
    color: white;
    transition: background 0.3s ease;
}
.gr-button:hover {
    background: #357abd;
}
#title {
    font-size: 2.2em;
    font-weight: 600;
    color: #333;
    text-align: center;
    margin-bottom: 10px;
}
#description {
    font-size: 1.1em;
    color: #666;
    text-align: center;
    max-width: 600px;
    margin: 0 auto 20px auto;
}
#examples-title {
    font-size: 1.3em;
    font-weight: 500;
    color: #333;
    margin-bottom: 10px;
}
footer {
    text-align: center;
    margin-top: 30px;
    padding: 15px;
    font-size: 0.9em;
    color: #666;
}
footer a {
    color: #4a90e2;
    text-decoration: none;
}
footer a:hover {
    text-decoration: underline;
}
"""

# Gradio Blocks UI (Simplified)
with gr.Blocks(css=custom_css) as demo:
    # Header
    gr.Markdown("<div id='title'>GoEmotions BERT Classifier</div>", elem_id="title")
    gr.Markdown(
        """
        <div id='description'>
        Predict emotions from text using a fine-tuned BERT model. 
        Enter your text below to see the detected emotions and their confidence scores.
        </div>
        """,
        elem_id="description"
    )
    
    # Input Section
    with gr.Group():
        text_input = gr.Textbox(
            label="Enter Your Text",
            placeholder="Type something like 'I’m just chilling today'...",
            lines=2,
            show_label=False
        )
        confidence_slider = gr.Slider(
            minimum=0.0,
            maximum=0.9,
            value=0.0,
            step=0.05,
            label="Minimum Confidence Threshold",
            info="Filter predictions below this confidence level (default thresholds still apply)"
        )
        submit_btn = gr.Button("Predict Emotions")
    
    # Output Section
    with gr.Group():
        processed_text_output = gr.Textbox(label="Preprocessed Text", lines=1, interactive=False)
        thresholded_output = gr.Textbox(label="Predicted Emotions", lines=3, interactive=False)
        output_plot = gr.Plot(label="Emotion Confidence Chart")
    
    # Example carousel
    with gr.Group():
        gr.Markdown("<div id='examples-title'>Try These Examples</div>", elem_id="examples-title")
        examples = gr.Examples(
            examples=[
                ["I’m thrilled to win this award! πŸ˜„", "Joy Example"],
                ["This is so frustrating, nothing works. 😣", "Annoyance Example"],
                ["I feel so sorry for what happened. 😒", "Sadness Example"],
                ["What a beautiful day to be alive! 🌞", "Admiration Example"],
                ["Feeling nervous about the exam tomorrow πŸ˜“ u/student r/study", "Nervousness Example"]
            ],
            inputs=[text_input],
            label=""
        )
    
    # Footer
    gr.HTML(
        """
        <footer>
            Built by logasanjeev | 
            <a href="https://huggingface.co/logasanjeev/goemotions-bert">Model Card</a> | 
            <a href="https://www.kaggle.com/code/ravindranlogasanjeev/evaluation-logasanjeev-goemotions-bert/notebook">Kaggle Notebook</a>
        </footer>
        """
    )
    
    # Bind predictions
    submit_btn.click(
        fn=predict_emotions_with_details,
        inputs=[text_input, confidence_slider],
        outputs=[processed_text_output, thresholded_output, output_plot]
    )

# Launch
if __name__ == "__main__":
    demo.launch()