|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T06:35:46.866637Z" |
|
}, |
|
"title": "WNUT-2020 Task 2: Identification of Informative COVID-19 English Tweets", |
|
"authors": [ |
|
{ |
|
"first": "Dat", |
|
"middle": [], |
|
"last": "Quoc Nguyen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "VinAI Research", |
|
"location": { |
|
"country": "Vietnam" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Thanh", |
|
"middle": [], |
|
"last": "Vu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Oracle Digital Assistant", |
|
"location": { |
|
"settlement": "Oracle", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Afshin", |
|
"middle": [], |
|
"last": "Rahimi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Queensland", |
|
"location": { |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mai", |
|
"middle": [ |
|
"Hoang" |
|
], |
|
"last": "Dao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "VinAI Research", |
|
"location": { |
|
"country": "Vietnam" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Linh", |
|
"middle": [ |
|
"The" |
|
], |
|
"last": "Nguyen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "VinAI Research", |
|
"location": { |
|
"country": "Vietnam" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Doan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "VinAI Research", |
|
"location": { |
|
"country": "Vietnam" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper, we provide an overview of the WNUT-2020 shared task on the identification of informative COVID-19 English Tweets. We describe how we construct a corpus of 10K Tweets and organize the development and evaluation phases for this task. In addition, we also present a brief summary of results obtained from the final system evaluation submissions of 55 teams, finding that (i) many systems obtain very high performance, up to 0.91 F 1 score, (ii) the majority of the submissions achieve substantially higher results than the baseline fastText (Joulin et al., 2017), and (iii) fine-tuning pre-trained language models on relevant language data followed by supervised training performs well in this task.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper, we provide an overview of the WNUT-2020 shared task on the identification of informative COVID-19 English Tweets. We describe how we construct a corpus of 10K Tweets and organize the development and evaluation phases for this task. In addition, we also present a brief summary of results obtained from the final system evaluation submissions of 55 teams, finding that (i) many systems obtain very high performance, up to 0.91 F 1 score, (ii) the majority of the submissions achieve substantially higher results than the baseline fastText (Joulin et al., 2017), and (iii) fine-tuning pre-trained language models on relevant language data followed by supervised training performs well in this task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "As of late-September 2020, the COVID-19 Coronavirus pandemic has led to about 1M deaths and 33M infected patients from 213 countries and territories, creating fear and panic for people all around the world. 1 Recently, much attention has been paid to building monitoring systems (e.g. The Johns Hopkins Coronavirus Dashboard) to track the development of the pandemic and to provide users the information related to the virus, 2 e.g. any new suspicious/confirmed cases near/in the users' regions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "It is worth noting that most of the \"official\" sources used in the tracking tools are not frequently kept up to date with the current pandemic situation, e.g. WHO updates the pandemic information only once a day. Those monitoring systems thus use social network data, e.g. from Twit-ter, as a real-time alternative source for updating the pandemic information, generally by crowdsourcing or searching for related information manually. However, the pandemic has been spreading rapidly; we observe a massive amount of data on social networks, e.g. about 3.5M of English Tweets posted daily on the Twitter platform (Lamsal, 2020) in which the majority are uninformative. Thus, it is important to be able to select the informative Tweets (e.g. COVID-19 Tweets related to new cases or suspicious cases) for downstream applications. However, manual approaches to identify the informative Tweets require significant human efforts, do not scale with rapid developments, and are costly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To help handle the problem, we propose a shared task which is to automatically identify whether a COVID-19 English Tweet is informative or not. Our task is defined as a binary classification problem: Given an English Tweet related to COVID-19, decide whether it should be classified as INFORMATIVE or UNINFORMATIVE. Here, informative Tweets provide information about suspected, confirmed, recovered and death cases as well as the location or travel history of the cases. The following example presents an informative Tweet: INFORMATIVE Update: Uganda Health Minister Jane Ruth Aceng has confirmed the first #coronavirus case in Uganda. The patient is a 36-yearold Ugandan male who arrived from Dubai today aboard Ethiopian Airlines. Patient travelled to Dubai 4 days ago. #Coron-avirusPandemic", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The goals of our shared task are: (i) To develop a language processing task that potentially impacts research and downstream applications, and (ii) To provide the research community with a new dataset for identifying informative COVID-19 English Tweets. To achieve the goals, we manually construct a dataset of 10K COVID-19 English Tweets with INFORMATIVE and UNIN-FORMATIVE labels. We believe that the dataset and systems developed for our task will be beneficial for the development of COVID-19 monitoring systems. All practical information, data download links and the final evaluation results can be found at the CodaLab website of our shared task: https://competitions.codalab. org/competitions/25845.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We define the guideline to annotate a COVID-19 related Tweet with the \"INFORMATIVE\" label if the Tweet mentions suspected cases, confirmed cases, recovered cases, deaths, number of tests performed as well as location or travel history associated with the confirmed/suspected cases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The WNUT-2020 Task dataset 2.1 Annotation guideline", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In addition, we also set further requirements in which the \"INFORMATIVE\" Tweet has to satisfy. In particular, the \"INFORMATIVE\" Tweet should not present a rumor or prediction. Furthermore, quantities mentioned in the Tweet have to be specific (e.g. \"two new cases\" or \"about 125 tested positives\") or could be inferred directly (e.g. \"120 coronavirus tests done so far, 40% tested positive\"), but not purely in percentages or rates (e.g. \"20%\", \"1000 per million\", or \"a third\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The WNUT-2020 Task dataset 2.1 Annotation guideline", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The COVID-19 related Tweets not satisfying the \"INFORMATIVE\" annotation guideline are annotated with the \"UNINFORMATIVE\" label. An uninformative Tweet example is as follows: UNINFORMATIVE Indonesia frees 18,000 inmates, as it records highest #coronavirus death toll in Asia behind China HTTPURL", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The WNUT-2020 Task dataset 2.1 Annotation guideline", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To be able to construct a dataset used in our shared task, we first have to crawl the COVID-19 related Tweets. We collect a general Tweet corpus related to the COVID-19 pandemic based on a predefined list of 10 keywords, including: \"coronavirus\", \"covid-19\", \"covid 19\", \"covid 2019\", \"covid19\", \"covid2019\", \"covid-2019\", \"Coron-aVirusUpdate\", \"Coronavid19\" and \"SARS-CoV-2\". We utilize the Twitter streaming API to download real-time English Tweets containing at least one keyword from the predefined list. 3 We stream the Tweet data for four months using the API from 01 st March 2020 to 30 th June 2020. We then filter out Tweets containing less than 10 words (including hashtags and user mentions) as well as Tweets from users with less than five hundred followers. This is to help reduce the rate of Tweets with fake news (our manual annotation process does not involve in verifying fake news) with a rather strong assumption that reliable information is more likely to be propagated by users with a large number of followers. 4 To handle the duplication problem: (i) we remove Retweets starting with the \"RT\" token, and (ii) in cases where two Tweets are the same after lowecasing as well as removing hashtags and user mentions, the earlier Tweet is kept and the subsequent Tweet will be filtered out as it tends to be a Retweet. Applying these filtering steps results in a final corpus of about 23M COVID-19 English Tweets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 509, |
|
"end": 510, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1033, |
|
"end": 1034, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "COVID-19 related Tweet collection", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "From the corpus of 23M Tweets, we select Tweets which are potentially informative, containing predefined strings relevant to the annotation guideline such as \"confirm\", \"positive\", \"suspected\", \"death\", \"discharge\", \"test\" and \"travel history\". We then remove similar Tweets with the tokenbased cosine similarity score (Wang et al., 2011) that is equal or greater than 0.7, resulting in a dataset of \"INFORMATIVE\" candidates. We then randomly sample 2K Tweets from this dataset for the first phase of annotation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 319, |
|
"end": 338, |
|
"text": "(Wang et al., 2011)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation process", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Three annotators are employed to independently annotate each of the 2K Tweets with one of the two labels \"INFORMATIVE\" and \"UN-INFORMATIVE\". We use the \"docanno\" toolkit for handling the annotations (Nakayama et al., 2018) . We measure the inter-annotator agreement to assess the quality of annotations and to see whether the guideline allows to carry out the task consistently. In particular, we use the Fleiss'", |
|
"cite_spans": [ |
|
{ |
|
"start": 199, |
|
"end": 222, |
|
"text": "(Nakayama et al., 2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation process", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Training Validation Test Total #INFOR 3,303 472 944 4,719 #UNINF 3,697 528 1,056 5,281 Total 7,000 1,000 2,000 10,000 Table 1 : Basic statistics of our dataset. #INFOR and #UNINF denote the numbers of \"INFORMATIVE\" and \"UNINFORMATIVE\" Tweets, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 115, |
|
"text": "Validation Test Total #INFOR 3,303 472 944 4,719 #UNINF 3,697 528 1,056 5,281 Total 7,000 1,000", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 129, |
|
"end": 136, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Item", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Kappa coefficient to assess the annotator agreement (Fleiss, 1971) . For this first phase, the Kappa score is 0.797 which can be interpreted as substantial (Landis and Koch, 1977) . We further run a discussion for Tweets where there is a disagreement in the assigned labels among the annotators. The discussion is to determine the final labels of the Tweets as well as to improve the quality of the annotation guideline. For the second phase, we employ the 2K annotated Tweets from the first phase to train a binary fastText classifier (Joulin et al., 2017) to classify a COVID-19 related Tweet into either \"INFORMA-TIVE\" or \"UNINFORMATIVE\". We utilize the trained classifier to predict the probability of \"IN-FORMATIVE\" for each of all remaining Tweets in the dataset of \"INFORMATIVE\" candidates from the first phase. Then we randomly sample 8K Tweets from the candidate dataset, including 3K, 2K and 3K Tweets associated with the probability \u2208 [0.0, 0.3), [0.3, 0.7) and [0.7, 1.0], respectively (here, we do not sample from the existing 2K annotated Tweets). The goal here is to select Tweets with varying degree of detection difficulty (with respect to the baseline) in both labels.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 66, |
|
"text": "(Fleiss, 1971)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 179, |
|
"text": "(Landis and Koch, 1977)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 536, |
|
"end": 557, |
|
"text": "(Joulin et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Item", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The three annotators then independently assign the \"INFORMATIVE\" or \"UNINFORMATIVE\" label to each of the 8K Tweets. The Kappa score is obtained at 0.818 which can be interpreted as almost perfect (Landis and Koch, 1977) . Similar to the first phase, for each Tweet with a disagreement among the annotators, we also run a further discussion to decide its final label annotation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 219, |
|
"text": "(Landis and Koch, 1977)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Item", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We merge the two datasets from the first and second phases to formulate the final gold standard corpus of 10K annotated Tweets, consisting of 4,719 \"INFORMATIVE\" Tweets and 5,281 \"UN-INFORMATIVE\" Tweets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Item", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To split the gold standard corpus into training, validation and test sets, we first categorize its Tweets into two categories of \"easy\" and \"not-easy\", in which the \"not-easy\" category contains Tweets with a label disagreement among annotators before participating in the annotation discussions. We then randomly select 7K Tweets for training, 1K Tweets for validation and 2K Tweets for test with a constraint that ensures the number of the \"not-easy\" Tweets in the training is equal to that in the validation and test sets. Table 1 describes the basic statistics of our corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 525, |
|
"end": 532, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data partitions", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Development phase: Both the training and validation sets with gold labels are released publicly to all participants for system development. Although we provide a default training and validation split of the released data, participants are free to use this data in any way they find useful when training and tuning their systems, e.g. using a different split or performing cross-validation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task organization", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Evaluation phase: The raw test set is released when the final phase of system evaluation starts. To keep fairness among participants, the raw test set is a relatively large set of 12K Tweets, and the actual 2K test Tweets by which the participants' system outputs are evaluated are hidden in this large test set. We allow each participant to upload at most 2 submissions during this final evaluation phase, in which the submission obtaining higher F 1 score is ranked higher in the leaderboard.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task organization", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Metrics: Systems are evaluated using standard evaluation metrics, including Accuracy, Precision, Recall and F 1 score. Note that the latter three metrics of Precision, Recall and F 1 will be calculated for the \"INFORMATIVE\" label only. The system evaluation submissions are ranked by the F 1 score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task organization", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Baseline: fastText (Joulin et al., 2017) is used as our baseline, employing the default data split.", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 40, |
|
"text": "(Joulin et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task organization", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In total, 121 teams spreading across 20 different countries registered to participate in our WNUT-2020 Task 2 during the system development phase. Of those 121 teams, 55 teams uploaded their submissions for the final evaluation phase. 5 We report results obtained for each team in Table 2. The baseline fastText achieves 0.7503 in F 1 score. In particular, 48 teams outperform the baseline in terms of F 1 . There are 39 teams with an F 1 greater than 0.80, in which 10 teams are with an F 1 greater than 0.90. Both NutCracker (Kumar and Singh, 2020) and NLP North (M\u00f8ller et al., 2020) obtain the highest F 1 score at 0.9096, in which NutCracker obtains the highest Accuracy at 91.50% that is 0.1% absolute higher than NLP North's.", |
|
"cite_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 236, |
|
"text": "5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 527, |
|
"end": 550, |
|
"text": "(Kumar and Singh, 2020)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 565, |
|
"end": 586, |
|
"text": "(M\u00f8ller et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Of the 55 teams, 36 teams submitted their system paper, in which 34 teams' papers are finally included in the Proceedings. All of the 36 teams with paper submissions employ pre-trained language models to extract latent features for learning classifiers. The majority of pre-trained language models employed include BERT (Devlin et al., 2019) , XLNet (Yang et al., 2019) , RoBERTa (Liu et al., 2019 ), BERTweet (Nguyen et al., 2020 and especially CT-BERT (M\u00fcller et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 341, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 350, |
|
"end": 369, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 397, |
|
"text": "(Liu et al., 2019", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 430, |
|
"text": "), BERTweet (Nguyen et al., 2020", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 454, |
|
"end": 475, |
|
"text": "(M\u00fcller et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Not surprisingly, CT-BERT, resulted in by continuing pre-training from the pre-trained BERTlarge model on a corpus of 22.5M COVID-19 related Tweets, is utilized in a large number of the highly-ranked systems. In particular, all of top 6 teams including NutCracker, NLP North, UIT-HSE (Tran et al., 2020) , #GCDH (Varachkina et al., 2020) , Loner and Phonemer (Wadhawan, 2020) utilize CT-BERT. That is why we find slight differences in their obtained F 1 scores. In addition, ensemble techniques are also used in a large proportion (61%) of the participating teams. Specifically, to obtain the best performance, the top 10 teams, except NLP North, #GCDH and Loner, all employ ensemble techniques.", |
|
"cite_spans": [ |
|
{ |
|
"start": 284, |
|
"end": 303, |
|
"text": "(Tran et al., 2020)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 312, |
|
"end": 337, |
|
"text": "(Varachkina et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 359, |
|
"end": 375, |
|
"text": "(Wadhawan, 2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this paper, we have presented an overview of the WNUT-2020 Task 2 \"Identification of Informative COVID-19 English Tweets\": (i) Provide details of the task, data preparation process, and the task organization, and (ii) Report the results obtained by participating teams and outline their commonly adopted approaches.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We receive registrations from 121 teams and final system evaluation submissions from 55 teams, in which 34/55 teams contribute detailed system descriptions. The evaluation results show that many systems obtain a very high performance of up to 0.91 F 1 score on the task, using pre-trained language models which are fine-tuned on unlabelled COVID-19 related Tweets (CT-BERT) and are subsequently trained on this task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://developer.twitter.com/ en/docs/twitter-api/v1/tweets/ filter-realtime/overview 4 We acknowledge that there are accounts with a large number of followers, who participate in publication and propagation of misinformation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "CXP949 is not shown on our CodaLab leaderboard because this team unfortunately makes an incorrectly-formatted submission file name, resulting in a fail for our CodaLab automatic evaluation program. We manually re-evaluate their submission and include its obtained results inTable 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Table 2: Final results on the test set. P, R and Acc. denote the Precision, Recall and Accuracy, respectively. Teams are ranked by their highest F 1 score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Measuring nominal scale agreement among many raters", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Joseph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fleiss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1971, |
|
"venue": "Psychological bulletin", |
|
"volume": "76", |
|
"issue": "5", |
|
"pages": "378--382", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph L Fleiss. 1971. Measuring nominal scale agree- ment among many raters. Psychological bulletin, 76(5):378-382.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Bag of Tricks for Efficient Text Classification", |
|
"authors": [ |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "427--431", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2017. Bag of Tricks for Efficient Text Classification. In Proceedings of the 15th Con- ference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Pa- pers, pages 427-431.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "NutCracker at WNUT-2020 Task 2: Robustly Identifying Informative COVID-19 Tweets using Ensembling and Adversarial Training", |
|
"authors": [ |
|
{ |
|
"first": "Priyanshu", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aadarsh", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 6th Workshop on Noisy User-generated Text", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Priyanshu Kumar and Aadarsh Singh. 2020. NutCracker at WNUT-2020 Task 2: Robustly Identifying Informative COVID-19 Tweets using Ensembling and Adversarial Training . In Proceed- ings of the 6th Workshop on Noisy User-generated Text.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The measurement of observer agreement for categorical data", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Landis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gary G", |
|
"middle": [], |
|
"last": "Koch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "Biometrics", |
|
"volume": "33", |
|
"issue": "1", |
|
"pages": "159--174", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J Richard Landis and Gary G Koch. 1977. The mea- surement of observer agreement for categorical data. Biometrics, 33(1):159-174.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv preprint", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A Robustly Optimized BERT Pretrain- ing Approach. arXiv preprint, arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Rob van der Goot, and Barbara Plank. 2020. NLP North", |
|
"authors": [ |
|
{ |
|
"first": "Anders", |
|
"middle": [ |
|
"Giovanni" |
|
], |
|
"last": "M\u00f8ller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anders Giovanni M\u00f8ller, Rob van der Goot, and Bar- bara Plank. 2020. NLP North at WNUT-2020", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Pre-training versus Ensembling for Detection of Informative COVID-19 English Tweets", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proceedings of the 6th Workshop on Noisy Usergenerated Text", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Task 2: Pre-training versus Ensembling for Detec- tion of Informative COVID-19 English Tweets. In Proceedings of the 6th Workshop on Noisy User- generated Text.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "COVID-Twitter-BERT: A Natural Language Processing Model to Analyse COVID-19 Content on Twitter", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcel", |
|
"middle": [], |
|
"last": "Salath\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Per", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kummervold", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2005.07503" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin M\u00fcller, Marcel Salath\u00e9, and Per E Kum- mervold. 2020. COVID-Twitter-BERT: A Nat- ural Language Processing Model to Analyse COVID-19 Content on Twitter. arXiv preprint arXiv:2005.07503.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Text Annotation Tool for Human. Software avail", |
|
"authors": [ |
|
{ |
|
"first": "Hiroki", |
|
"middle": [], |
|
"last": "Nakayama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takahiro", |
|
"middle": [], |
|
"last": "Kubo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junya", |
|
"middle": [], |
|
"last": "Kamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yasufumi", |
|
"middle": [], |
|
"last": "Taniguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hiroki Nakayama, Takahiro Kubo, Junya Kamura, Ya- sufumi Taniguchi, and Xu Liang. 2018. doccano: Text Annotation Tool for Human. Software avail- able from https://github.com/doccano/doccano.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "BERTweet: A pre-trained language model for English Tweets", |
|
"authors": [ |
|
{ |
|
"first": "Thanh", |
|
"middle": [], |
|
"last": "Dat Quoc Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anh", |
|
"middle": [ |
|
"Tuan" |
|
], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dat Quoc Nguyen, Thanh Vu, and Anh Tuan Nguyen. 2020. BERTweet: A pre-trained language model for English Tweets. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing: System Demonstrations.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "UIT-HSE at WNUT-2020 Task 2: Exploiting CT-BERT for Identifying COVID-19 Information on the Twitter Social Network", |
|
"authors": [ |
|
{ |
|
"first": "Khiem", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Phan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiet", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngan Luu Thuy", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 6th Workshop on Noisy User-generated Text", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Khiem Tran, Hao Phan, Kiet Nguyen, and Ngan Luu Thuy Nguyen. 2020. UIT-HSE at WNUT- 2020 Task 2: Exploiting CT-BERT for Identifying COVID-19 Information on the Twitter Social Net- work. In Proceedings of the 6th Workshop on Noisy User-generated Text.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "#GCDH at WNUT-2020 Task 2: BERT-Based Models for the Detection of Informativeness in English COVID-19 Related Tweets", |
|
"authors": [ |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Varachkina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Ziehe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tillmann", |
|
"middle": [], |
|
"last": "Do\u0144icke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franziska", |
|
"middle": [], |
|
"last": "Pannach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 6th Workshop on Noisy User-generated Text", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hanna Varachkina, Stefan Ziehe, Tillmann Do\u0144icke, and Franziska Pannach. 2020. #GCDH at WNUT- 2020 Task 2: BERT-Based Models for the Detec- tion of Informativeness in English COVID-19 Re- lated Tweets. In Proceedings of the 6th Workshop on Noisy User-generated Text.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Phonemer at WNUT-2020 Task 2: Sequence Classification Using COVID Twitter BERT and Bagging Ensemble Technique based on Plurality Voting", |
|
"authors": [ |
|
{ |
|
"first": "Anshul", |
|
"middle": [], |
|
"last": "Wadhawan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 6th Workshop on Noisy User-generated Text", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anshul Wadhawan. 2020. Phonemer at WNUT-2020 Task 2: Sequence Classification Using COVID Twit- ter BERT and Bagging Ensemble Technique based on Plurality Voting. In Proceedings of the 6th Work- shop on Noisy User-generated Text.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Fast-join: An efficient method for fuzzy token matching based string similarity join", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Fe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 27th IEEE International Conference on Data Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "458--469", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Wang, G. Li, and J. Fe. 2011. Fast-join: An effi- cient method for fuzzy token matching based string similarity join. In Proceedings of the 27th IEEE In- ternational Conference on Data Engineering, pages 458-469.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "XLNet: Generalized Autoregressive Pretraining for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "5753--5763", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. XLNet: Generalized Autoregressive Pretraining for Language Understanding. In Advances in Neural In- formation Processing Systems 32, pages 5753-5763.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": {} |
|
} |
|
} |