File size: 4,420 Bytes
c962072
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7b3cf2d
 
 
 
 
c962072
7b3cf2d
 
 
 
 
c962072
 
7b3cf2d
 
 
c962072
7b3cf2d
c962072
 
7b3cf2d
 
 
 
 
 
 
 
 
c962072
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import gradio as gr
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline


class TwitterEmotionClassifier:
    def __init__(self, model_name: str, model_type: str):
        self.is_gpu = False
        self.model_type = model_type
        device = torch.device("cuda") if self.is_gpu else torch.device("cpu")
        model = AutoModelForSequenceClassification.from_pretrained(model_name)
        tokenizer = AutoTokenizer.from_pretrained(model_name)
        model.to(device)
        model.eval()
        self.bertweet = pipeline(
            "text-classification",
            model=model,
            tokenizer=tokenizer,
            device=self.is_gpu - 1,
        )
        self.deberta = None
        self.emotions = {
            "LABEL_0": "sadness",
            "LABEL_1": "joy",
            "LABEL_2": "love",
            "LABEL_3": "anger",
            "LABEL_4": "fear",
            "LABEL_5": "surprise",
        }

    def get_model(self, model_type: str):
        if self.model_type == "bertweet" and model_type == self.model_type:
            return self.bertweet
        elif model_type == "deberta":
            if self.deberta:
                return self.deberta
            model = AutoModelForSequenceClassification.from_pretrained(
                "Emanuel/twitter-emotion-deberta-v3-base"
            )
            tokenizer = AutoTokenizer.from_pretrained(
                "Emanuel/twitter-emotion-deberta-v3-base"
            )
            self.deberta = pipeline(
                "text-classification",
                model=model,
                tokenizer=tokenizer,
                device=self.is_gpu - 1,
            )
            return self.deberta

    def predict(self, twitter: str, model_type: str):
        classifier = self.get_model(model_type)
        preds = classifier(twitter, return_all_scores=True)
        if preds:
            pred = preds[0]
            res = {
                "Sadness ๐Ÿ˜ข": pred[0]["score"],
                "Joy ๐Ÿ˜‚": pred[1]["score"],
                "Love ๐Ÿ’›": pred[2]["score"],
                "Anger ๐Ÿ˜ ": pred[3]["score"],
                "Fear ๐Ÿ˜ฑ": pred[4]["score"],
                "Surprise ๐Ÿ˜ฎ": pred[5]["score"],
            }
            return res
        return None


def main():

    model = TwitterEmotionClassifier("Emanuel/bertweet-emotion-base", "bertweet")
    interFace = gr.Interface(
        fn=model.predict,
        inputs=[
            gr.inputs.Textbox(
                placeholder="What's happenning?", label="Tweet content", lines=5
            ),
            gr.inputs.Radio(["bertweet", "deberta"], label="Model"),
        ],
        outputs=gr.outputs.Label(num_top_classes=6, label="Emotions of this tweet is "),
        verbose=True,
        examples=[
            ["Tesla Bot is truly amazing. It's the early steps of a revolution in the role that AI & robots play in human civilization.

What the Tesla team was been able to accomplish in the last few months is just incredible.

As someone who loves AI and robotics, I'm inspired beyond words.", "bertweet"],
            [
                "I got food poisoning. It sucks ๐Ÿฅต but it makes me appreciate: 
1. the days when I'm not sick and
2. just how damn incredible the human body is at fighting off all the things that try to kill it.

Biology is awesome. Life is awesome.",
                "bertweet",
            ],
            ["I'm adding human-created captions to many podcasts soon. (It's expensive ๐Ÿ˜”) These identify the speaker, are timed to the audio, and so make for good training data.

When you and I do a podcast, we too will become immortalized as training data.", "bertweet"],
            [
                "We live inside a simulation and are ourselves creating progressively more realistic and interesting simulations. Existence is a recursive simulation generator.",
                "bertweet",
            ],
            ["Here's my conversation with Will Sasso, one of the funniest people on the planet and someone who I've been a fan of for over 20 years. https://youtube.com/watch?v=xewD1apJNhw

PS: His 
@Twitter
 account 
@WillSasso
 got hacked yesterday. 
@TwitterSupport
 please help him out!", "deberta"],
        ],
        title="Emotion classification ๐Ÿค–",
        description="",
        theme="huggingface",
    )
    interFace.launch()


if __name__ == "__main__":
    main()