|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:12:41.589025Z" |
|
}, |
|
"title": "DD-TIG at Constraint@ACL2022: Multimodal Understanding and Reasoning for Role Labeling of Entities in Hateful Memes", |
|
"authors": [ |
|
{ |
|
"first": "Ziming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Han", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Xiaolong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The memes serve as an important tool in online communication, whereas some hateful memes endanger cyberspace by attacking certain people or subjects. Recent studies address hateful memes detection while further understanding of relationships of entities in memes remains unexplored. This paper presents our work at the Constraint@ACL2022 Shared Task: Hero, Villain and Victim: Dissecting harmful memes for semantic role labelling of entities. In particular, we propose our approach utilizing transformerbased multimodal models through a visual commonsense reasoning (VCR) method with data augmentation, continual pretraining, loss reweighting, and ensemble learning. We describe the models used, the ways of preprocessing and experiments implementation. As a result, our best model achieves the Macro F1-score of 54.707 on the test set of this shared task 1 .", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The memes serve as an important tool in online communication, whereas some hateful memes endanger cyberspace by attacking certain people or subjects. Recent studies address hateful memes detection while further understanding of relationships of entities in memes remains unexplored. This paper presents our work at the Constraint@ACL2022 Shared Task: Hero, Villain and Victim: Dissecting harmful memes for semantic role labelling of entities. In particular, we propose our approach utilizing transformerbased multimodal models through a visual commonsense reasoning (VCR) method with data augmentation, continual pretraining, loss reweighting, and ensemble learning. We describe the models used, the ways of preprocessing and experiments implementation. As a result, our best model achieves the Macro F1-score of 54.707 on the test set of this shared task 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Memes are getting popular as a communication tool on social media platforms for expressions of opinions and emotions, conveying a subtle message through multimodal information from both images and texts. However, memes are increasingly abused to spread hate instigate social unrest and therefore seem to be a new form of expression of hate speech on online platforms (Bhattacharya, 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 367, |
|
"end": 387, |
|
"text": "(Bhattacharya, 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Automatic hateful memes detection is difficult since it primarily requires context and external knowledge to understand online speech, which sometimes can be very short and contains nuanced meaning (Pramanick et al., 2021) . A new type of challenging task has been introduced by The Hateful Memes Challenge (Kiela et al., 2020) proposed by Facebook AI to leverage machine learning models to address hateful memes detection problems, which can only be solved by joint reasoning and un-derstanding of visual and textual information (Zhu, 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 222, |
|
"text": "(Pramanick et al., 2021)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 327, |
|
"text": "(Kiela et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 530, |
|
"end": 541, |
|
"text": "(Zhu, 2020)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In previous studies, researchers focus on binary classification problems, labelling a meme as hateful or non-hateful based on image and text features (Afridi et al., 2020) . Moreover, the relationships of entities in memes remain unexplored, and the task of role labelling of entities in hateful memes can be more sophisticated.", |
|
"cite_spans": [ |
|
{ |
|
"start": 150, |
|
"end": 171, |
|
"text": "(Afridi et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The Constraint@ACL2022 Shared Task: Hero, Villain and Victim: Dissecting harmful memes for semantic role labelling of entities offers us a perspective on this issue (Sharma et al., 2022) . This task aims to promote the detection and classification of glorified, vilified or victimized entities within a meme. The shared dataset concerns memes from US Politics domains and Covid-19. Covid-19-related online hostile content especially demands to be detected as early as possible after their appearance on social media.", |
|
"cite_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 186, |
|
"text": "(Sharma et al., 2022)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we present our work on this task. Specifically, mainstream multimodal models of transformer-based architecture are applied through a visual commonsense reasoning (VCR) method, with the leverage of continual pretraining to fit models with our dataset. Then, data augmentation and loss re-weighting are implemented to improve the performance of models. The predictions from variant models are combined in a machine learning method to produce final results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Hateful memes understanding and reasoning is a vision and language task. Current state-of-theart Vision-Language machine learning models are based on the transformer architecture (Vaswani et al., 2017) . Multimodal models learn the joint visual and textual representations through selfsupervised learning that utilize large-scale unlabelled data to conduct auxiliary tasks (Chen et al., 2022) , including masked language modelling based on randomly-masked sub-words, masked region prediction and image-text matching. Among these models, there are two prevalent approaches: singlestream and dual-stream (Du et al., 2022) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 201, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 392, |
|
"text": "(Chen et al., 2022)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 602, |
|
"end": 619, |
|
"text": "(Du et al., 2022)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In single-stream architecture, the representations of two modalities are learned by a single transformer encoder. Particularly, the text embed-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "dings L = {w 1 , w 2 , w 3 , \u2022 \u2022 \u2022 , w l } and image fea- tures V = {o 1 , o 2 , o 3 , \u2022 \u2022 \u2022 , o k } are concatenated together as X = {L \u2225 V },", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "added some special embeddings to indicate position and modalities, and fed into a transformer-based encoder.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "There are many implementations in singlestream models, such as VisualBERT (Li et al., 2019) , UNITER (Chen et al., 2020) , OSCAR .", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 91, |
|
"text": "(Li et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 101, |
|
"end": 120, |
|
"text": "(Chen et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In dual-stream models, the image and text features are first sent to two independent encoders. Then two features are separately fed into crossmodal transformer layers, where the query vectors are from one modality while the key and value vectors are from another. They are responsible for exchanging the information and aligning the semantics between the two modalities L and V . The formula of cross-modal transformer layers is represented as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "L m i = CrossAttL\u2212V (L m\u22121 i , {V m\u22121 1 , \u2022 \u2022 \u2022 , V m\u22121 k }) (1) V m i = CrossAttV \u2212L(V m\u22121 i , {L m\u22121 1 , \u2022 \u2022 \u2022 , L m\u22121 l }) (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "where m is the m th cross-attention layer, k is the number of visual tokens, and l is the length of text tokens. Following each cross-attention layer, there is also a layer computing the self-attention of each modality independently. Features are combined at the end of the model. Several dual-stream models have been proposed in former studies, such as LXMERT (Tan and Bansal, 2019) , ERNIE-Vil , De-VLBERT , VilBERT ,", |
|
"cite_spans": [ |
|
{ |
|
"start": 361, |
|
"end": 383, |
|
"text": "(Tan and Bansal, 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Given the image and transcribed text of a meme, the role of a certain entity in this meme will be determined as hero, villain, victim or other, which can be interpreted as a multi-class classification task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Definition", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Input: a meme image V , text transcriptions L, a entity E", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Definition", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Output: y \u2208 {hero, villain, victim, other}", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Definition", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The official evaluation measure for the shared task is the macro-F1 score for the multi-class classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Definition", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The dataset provided in this task is a combination of memes from Covid-19 and US Politics domain. Every sample in the train and validation set contains an image, a transcription of texts and a list of entities with annotated labels. The shared task organizers provide the definitions for each class 2 :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Composition", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Hero: the entity is presented in a positive light, glorified for its actions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Composition", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Villain: the entity is portrayed negatively, e.g., in an association with adverse traits like wickedness, cruelty, hypocrisy, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Composition", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Victim: the entity is portrayed as suffering the negative impact of someone else's actions or conveyed implicitly within the meme.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Composition", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Other: the entity is not a hero, a villain, or a victim.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Composition", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We present the distribution of entities' roles in Table 1 . There is a considerable imbalance in the distribution of entities' roles where the \"other\" class accounts for more than 80 percent of the whole dataset. Meanwhile, the distribution of entities' frequency also shows a disparity. We present some most frequent entities with their roles distribution in Figure 1 . For visual feature preprocessing, we use the pretrained Mask-RCNN model provided in the detectron2 framework 3 to obtain the object detection based region feature embedding", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 57, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 368, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Composition", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "V = [o 1 , o 2 , \u2022 \u2022 \u2022 , o k ] of images.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Composition", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Detectron2 is proposed by Facebook AI with state-of-the-art detection and segmentation algorithms. Specifically, 50 boxes of 2048 dimensions region-based image features are extracted for every meme. For the text transcriptions, we make the content lower-case and remove punctuation and stopwords with NLTK library (Loper and Bird, 2002) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 336, |
|
"text": "(Loper and Bird, 2002)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Composition", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Four mainstream multimodal models of VL transformer architectures are applied in this work, namely: VisualBERT, UNITER, OSCAR, and ERNIE-Vil.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vision and Language Models", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "VisualBERT (Li et al., 2019) , known as the first image-text pre-training model, uses the visual features extracted by Faster R-CNN, concatenates the visual features and textual embeddings, and then feeds the concatenated features to a single transformer initialled by BERT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 28, |
|
"text": "(Li et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vision and Language Models", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "UNITER (Chen et al., 2020) learns contextualized joint representation of both visual and textual OSCAR , instead of simply using image-text pair, adds object tags detected from the image and represent the image-text pair as a <Word, Tag, Image> triple to help the fusion encoder better align different modalities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 26, |
|
"text": "(Chen et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vision and Language Models", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "ERNIE-Vil , as a typical dualstream model, enhances the model with the application of scene utilizing scene graphs of visual scenes, which can learn the joint representations characterizing the alignments of the detailed semantics across vision and language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vision and Language Models", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "For domain adaptation, we carry out continual pretraining on our dataset to reduce the distribution gap between the pretraining dataset and our memes dataset. Masked Language Modeling (MLM) pretraining task is taken on pretraining VisualBERTlarge, UNITER-large, and OSCAR-large model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vision and Language Models", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Visual Commonsense Reasoning (VCR) focuses on a higher-order cognitive and commonsense understanding of relationships of the visual components in the image (Zellers et al., 2019) . Former studies take a question, answer choices and an image into models to predict the right answer as a multi-class classification problem (Su et al., 2019) . We modify this method's input and output format to conduct our experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 178, |
|
"text": "(Zellers et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 321, |
|
"end": 338, |
|
"text": "(Su et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VCR Implementation", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "As can be seen in Figure 2 , we concatenate the given entity and text tokens as the textual input with a separate token [SEP ], while different segment embedding will be added respectively to indicate their states. Then, textual input and visual will be concatenated in the single-stream model like VisualBERT. They would be separately sent into encoders in the dual-stream model like ERNIE-Vil. In the single-stream model, the final output feature of [CLS] element is taken. In the dualstream model, textual and visual features are fused through sum or multiplication. Then, features are fed to a linear layer with softmax to predict the role of the given entity.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 26, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "VCR Implementation", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The final objective is to minimize the crossentropy (CE) loss between the predicted distribution and the targeted role category, which can be formally defined as: ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VCR Implementation", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "[CLS]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VCR Implementation", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "p(x) = exp(g(x)i) N j=1 exp(g(x)j) (3) L = \u2212 logp(x) \u2022 y (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VCR Implementation", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "where g(x) is the output of the FC layer and N is the number of labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VCR Implementation", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "A loss re-weighting strategy has been applied in our experiment since the \"other\" class accounts for the overwhelming majority of entries in samples, while hero, villain, and victim roles shall be stressed. Thus, our new loss function is defined as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Re-weighting", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "L = \u2212 \u03b1 \u2022 log p(x) (5) \u03b1 = \u03b1 neg y = other \u03b1 pos else (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Re-weighting", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "where \u03b1 neg and \u03b1 pos are the weights for the \"other\" role and \"non-other\" role respectively as \u03b1 neg < \u03b1 pos and \u03b1 neg + \u03b1 pos = 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Re-weighting", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We adopt the data augmentation with the backtranslation strategy. Specifically, the provided text of each meme is paraphrased with Baidu translation API: English-Chinese-English and English-French-English. Diverse sentences are produced for each meme to enrich our dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Augmentation", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "We train these four base models with different seeds to produce a total of 16 models. The predicted scores on validation set are generated by all models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensemble Learning", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "Then, a SVM model is trained with the predictions and true labels. In the testing phase, the predictions on the test set are fed into the trained SVM model to make final ensemble predictions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensemble Learning", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "For continual pretraining on VisualBERT, OSCAR, and UNITER, each word in the text transcriptions is randomly masked at a probability of 15 percent. The final output feature corresponding to the masked word is fed into a classifier over the whole vocabulary, driven by softmax cross-entropy loss.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "We finetune all models with a focal loss (Lin et al., 2017 ) and a batch size of 16. The max sequence length is set at 256. The Adam optimizer is used with a learning rate of 1e-5 and 10 percent linear warm-up steps. VisualBERT, OSCAR, and UNITER are trained for 10 epochs and ERNIE-Vil models are trained for 10000 steps. The weights with the best scores on the validation set are saved and used for inference on the test set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 58, |
|
"text": "(Lin et al., 2017", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "Macro F1-score Original model", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "VisualBERT-large 47.8 UNITER-large 48.8 OSCAR-large 48.5 ERNIE-Vil-large 50.9 Continual pretrained model VisualBERT-large 48.2 UNITER-large 49.9 OSCAR-large 49.2 Ensemble 54.7 In Table 2 , we present the results of our experiments in a step by step manner. We started with finetuning base models provided by original authors. Then, VisualBERT-large, UNITER-large, and OSCAR-large models are pretrained on our dataset with MLM task and finetuned on our task. After that, ensemble learning is implemented to combine results of various models. We evaluate our models using official metrics Macro F1-score on test set. ERNIE-Vil has been the SoTA model on the multimodal task leaderboard and in this task it also achieves competitive performance at 50.9 on the test set without further continual pretraining, which outperforms all the single-stream models by over 2 in Macro F1-score. We consider that through incorporating structured knowledge obtained from scene graphs during cross-modal pretraining, ERNIE-Vil learns more knowledge which benefits the downstream task.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 186, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Meanwhile, VisualBERT-large, UNITER-large, and OSCAR-large models shows improvements in performance through continual pretraining, which can be interpreted as domain adaptation on our dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Ensemble learning remarkably raises our score by 3.5 than the best single model, which achieves the best score for our submission in this task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A classification report is presented in table 3, which allows us to do further assessments on our system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Our system has a relatively poor performance on the class Hero. On the one hand, we interpret it as a lack of sample of this class in the training set. It is insufficient for our model to learn the features of this class. On the other hand, through observing bad cases, we find some memes need considerable external knowledge about history and politics, which can even be challenging for human beings to comprehend and do classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In our experiment, we use an End2End solution to do roles classification, concatenating the entity with input sequence as a <entity, text, image> triplet. However, we do not directly point out the entity's corresponding region in the image. Some other researchers have discussed this problem: it is naturally weakly-supervised learning since there are no explicitly labelled alignments between regions or objects in an image and words or phrases in the text. We hypothesize that our model can not align some unusual entities correctly with its image and text. Moreover, comprehending a meme in the political domain heavily relies on knowledge, while the size of the whole dataset is relatively small, so our continual pretraining on a task-specific dataset is far from sufficient. There are two directions for further development of our system on this issue. On the one hand, more in-domain data can be incorporated to enlarge the dataset. On the other hand, knowledge-based models or external knowledge sources can be introduced to help the model understand the background and reason the relations of entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future Directions", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In this paper, we have exploited a VCR approach to tackle the role labelling of entities in hateful memes, which is a novel task in multimodal understanding and reasoning. Four popular transformerbased multimodal models, VisualBERT, UNITER, OSCAR, and ERNIE-Vil are applied as base models while strategies like loss re-weighting and data augmentation are implemented during the training of models. Then, continual pretraining is taken for domain adaptation and achieves better performance. Ensemble learning of variant models achieves the impressive Macro F1-score of 0.5470 on the final (unseen) test set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://codalab.lisn.upsaclay.fr/ competitions/906", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/facebookresearch/ detectron2 modalities through local alignment in the reconstruction of masked tokens/regions across modalities, powering heterogeneous downstream V+L tasks with joint multimodal embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A multimodal memes classification: A survey and open research issues", |
|
"authors": [ |
|
{ |
|
"first": "Aftab", |
|
"middle": [], |
|
"last": "Tariq Habib Afridi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Alam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jawad", |
|
"middle": [], |
|
"last": "Muhammad Numan Khan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Young-Koo", |
|
"middle": [], |
|
"last": "Khan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "The Proceedings of the Third International Conference on Smart City Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1451--1466", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tariq Habib Afridi, Aftab Alam, Muhammad Numan Khan, Jawad Khan, and Young-Koo Lee. 2020. A multimodal memes classification: A survey and open research issues. In The Proceedings of the Third International Conference on Smart City Applications, pages 1451-1466. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Social degeneration through social media: A study of the adverse impact of 'memes", |
|
"authors": [ |
|
{ |
|
"first": "Prithvi", |
|
"middle": [], |
|
"last": "Bhattacharya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Sixth HCT Information Technology Trends (ITT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "44--46", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Prithvi Bhattacharya. 2019. Social degeneration through social media: A study of the adverse im- pact of 'memes'. In 2019 Sixth HCT Information Technology Trends (ITT), pages 44-46. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Vlp: A survey on vision-language pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Feilong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duzhen", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minglun", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiuyi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2202.09061" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Feilong Chen, Duzhen Zhang, Minglun Han, Xiuyi Chen, Jing Shi, Shuang Xu, and Bo Xu. 2022. Vlp: A survey on vision-language pre-training. arXiv preprint arXiv:2202.09061.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Uniter: Universal image-text representation learning", |
|
"authors": [ |
|
{ |
|
"first": "Yen-Chun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linjie", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Licheng", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [ |
|
"El" |
|
], |
|
"last": "Kholy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Faisal", |
|
"middle": [], |
|
"last": "Ahmed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "European conference on computer vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "104--120", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. 2020. Uniter: Universal image-text representation learning. In European conference on computer vision, pages 104-120. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A survey of vision-language pre-trained models", |
|
"authors": [ |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zikang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junyi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wayne Xin", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2202.10936" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yifan Du, Zikang Liu, Junyi Li, and Wayne Xin Zhao. 2022. A survey of vision-language pre-trained mod- els. arXiv preprint arXiv:2202.10936.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Amanpreet Singh, Pratik Ringshia, and Davide Testuggine. 2020. The hateful memes challenge: Detecting hate speech in multimodal memes", |
|
"authors": [ |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamed", |
|
"middle": [], |
|
"last": "Firooz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [], |
|
"last": "Mohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vedanuj", |
|
"middle": [], |
|
"last": "Goswami", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "2611--2624", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douwe Kiela, Hamed Firooz, Aravind Mohan, Vedanuj Goswami, Amanpreet Singh, Pratik Ringshia, and Davide Testuggine. 2020. The hateful memes chal- lenge: Detecting hate speech in multimodal memes. Advances in Neural Information Processing Systems, 33:2611-2624.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Visualbert: A simple and performant baseline for vision and language", |
|
"authors": [ |
|
{ |
|
"first": "Liunian Harold", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Da", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cho-Jui", |
|
"middle": [], |
|
"last": "Hsieh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.03557" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. 2019. Visualbert: A sim- ple and performant baseline for vision and language. arXiv preprint arXiv:1908.03557.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Oscar: Objectsemantics aligned pre-training for vision-language tasks", |
|
"authors": [ |
|
{ |
|
"first": "Xiujun", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xi", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunyuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pengchuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaowei", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lijuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houdong", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "European Conference on Computer Vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "121--137", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. 2020. Oscar: Object- semantics aligned pre-training for vision-language tasks. In European Conference on Computer Vision, pages 121-137. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Focal loss for dense object detection", |
|
"authors": [ |
|
{ |
|
"first": "Tsung-Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Priya", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ross", |
|
"middle": [], |
|
"last": "Girshick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE international conference on computer vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2980--2988", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Doll\u00e1r. 2017. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision, pages 2980-2988.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Nltk: The natural language toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Loper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edward Loper and Steven Bird. 2002. Nltk: The natural language toolkit. arXiv preprint cs/0205028.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. Advances in neural information processing systems", |
|
"authors": [ |
|
{ |
|
"first": "Jiasen", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. 2019. Vilbert: Pretraining task-agnostic visiolinguis- tic representations for vision-and-language tasks. Ad- vances in neural information processing systems, 32.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Detecting harmful memes and their targets", |
|
"authors": [ |
|
{ |
|
"first": "Shraman", |
|
"middle": [], |
|
"last": "Pramanick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitar", |
|
"middle": [], |
|
"last": "Dimitrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rituparna", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shivam", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Md", |
|
"middle": [], |
|
"last": "Akhtar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanmoy", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2110.00413" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shraman Pramanick, Dimitar Dimitrov, Rituparna Mukherjee, Shivam Sharma, Md Akhtar, Preslav Nakov, Tanmoy Chakraborty, et al. 2021. Detect- ing harmful memes and their targets. arXiv preprint arXiv:2110.00413.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Findings of the constraint 2022 shared task on detecting the hero, the villain, and the victim in memes", |
|
"authors": [ |
|
{ |
|
"first": "Shivam", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tharun", |
|
"middle": [], |
|
"last": "Suresh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atharva", |
|
"middle": [], |
|
"last": "Jitendra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Himanshi", |
|
"middle": [], |
|
"last": "Mathur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Md", |
|
"middle": [ |
|
"Shad" |
|
], |
|
"last": "Akhtar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanmoy", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Proceedings of the Workshop on Combating Online Hostile Posts in Regional Languages during Emergency Situations -CONSTRAINT 2022", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shivam Sharma, Tharun Suresh, Atharva Jitendra, Hi- manshi Mathur, Preslav Nakov, Md. Shad Akhtar, and Tanmoy Chakraborty. 2022. Findings of the con- straint 2022 shared task on detecting the hero, the villain, and the victim in memes. In Proceedings of the Workshop on Combating Online Hostile Posts in Regional Languages during Emergency Situations - CONSTRAINT 2022, Collocated with ACL 2022.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Vl-bert: Pre-training of generic visual-linguistic representations", |
|
"authors": [ |
|
{ |
|
"first": "Weijie", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xizhou", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lewei", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jifeng", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.08530" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. 2019. Vl-bert: Pre-training of generic visual-linguistic representations. arXiv preprint arXiv:1908.08530.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Lxmert: Learning cross-modality encoder representations from transformers", |
|
"authors": [ |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.07490" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hao Tan and Mohit Bansal. 2019. Lxmert: Learning cross-modality encoder representations from trans- formers. arXiv preprint arXiv:1908.07490.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems, 30.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Ernie-vil: Knowledge enhanced vision-language representations through scene graph", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiji", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weichong", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Hao Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2006.16934" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei Yu, Jiji Tang, Weichong Yin, Yu Sun, Hao Tian, Hua Wu, and Haifeng Wang. 2020. Ernie-vil: Knowledge enhanced vision-language representations through scene graph. arXiv preprint arXiv:2006.16934.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "From recognition to cognition: Visual commonsense reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Rowan", |
|
"middle": [], |
|
"last": "Zellers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Bisk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE/CVF conference on computer vision and pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6720--6731", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rowan Zellers, Yonatan Bisk, Ali Farhadi, and Yejin Choi. 2019. From recognition to cognition: Vi- sual commonsense reasoning. In Proceedings of the IEEE/CVF conference on computer vision and pat- tern recognition, pages 6720-6731.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Devlbert: Learning deconfounded visio-linguistic representations", |
|
"authors": [ |
|
{ |
|
"first": "Shengyu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tan", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Kuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhou", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianke", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jin", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongxia", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 28th ACM International Conference on Multimedia", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4373--4382", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shengyu Zhang, Tan Jiang, Tan Wang, Kun Kuang, Zhou Zhao, Jianke Zhu, Jin Yu, Hongxia Yang, and Fei Wu. 2020. Devlbert: Learning deconfounded visio-linguistic representations. In Proceedings of the 28th ACM International Conference on Multimedia, pages 4373-4382.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Enhance multimodal transformer with external label and in-domain pretrain: Hateful meme challenge winning solution", |
|
"authors": [ |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2012.08290" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ron Zhu. 2020. Enhance multimodal transformer with external label and in-domain pretrain: Hateful meme challenge winning solution. arXiv preprint arXiv:2012.08290.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Roles distribution of most frequent entities" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "and output format for dual-stream model" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "The input and output format of our system" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"num": null, |
|
"text": "Numbers of sample for each role label in Covid-19 and US Politics domain", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"num": null, |
|
"text": "Results of models in our systems", |
|
"content": "<table><tr><td>6 Results and Discussion</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"html": null, |
|
"num": null, |
|
"text": "An classification report for our final submission", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |