|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:48:51.561481Z" |
|
}, |
|
"title": "CTLR@WiC-TSV: Target Sense Verification using Marked Inputs and Pre-trained Models", |
|
"authors": [ |
|
{ |
|
"first": "Jose", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Moreno", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "UMR 5505 CNRS F-31000", |
|
"institution": "University of Toulouse IRIT", |
|
"location": { |
|
"settlement": "Toulouse", |
|
"country": "France" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Elvys", |
|
"middle": [ |
|
"Linhares" |
|
], |
|
"last": "Pontes", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University", |
|
"location": { |
|
"addrLine": "of La Rochelle L3i F-17000, La Rochelle", |
|
"country": "France" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ga\u00ebl", |
|
"middle": [], |
|
"last": "Dias", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "UMR 6072 CNRS F-14000", |
|
"institution": "University of Caen GREYC", |
|
"location": { |
|
"settlement": "Caen", |
|
"country": "France" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes the CTRL participation in the Target Sense Verification of the Words in Context challenge (WiC-TSV) at SemDeep-6. Our strategy is based on a simplistic annotation scheme of the target words to later be classified by well-known pre-trained neural models. In particular, the marker allows to include position information to help models to correctly identify the word to disambiguate. Results on the challenge show that our strategy outperforms other participants (+11, 4 Accuracy points) and strong baselines (+1, 7 Accuracy points).", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes the CTRL participation in the Target Sense Verification of the Words in Context challenge (WiC-TSV) at SemDeep-6. Our strategy is based on a simplistic annotation scheme of the target words to later be classified by well-known pre-trained neural models. In particular, the marker allows to include position information to help models to correctly identify the word to disambiguate. Results on the challenge show that our strategy outperforms other participants (+11, 4 Accuracy points) and strong baselines (+1, 7 Accuracy points).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "This paper describes the CTLR 1 participation at the Word in Context challenge on the Target Sense Verification (WiC-TSV) task at SemDeep-6. In this challenge, given a target word w within its context participants are asked to solve a binary task organised in three sub-tasks:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Sub-task 1 consists in predicting if the target word matches with a given definition,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Sub-task 2 consists in predicting if the target word matches with a given set of hypernyms, and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Sub-task 3 consists in predicting if the target word matches with a given couple definition and set of hypernyms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our system is based on a masked neural language model with position information for Word Sense Disambiguation (WSD). Neural language models are recent and powerful resources useful for multiple Natural Language Processing (NLP) tasks (Devlin et al., 2018) . However, little effort has been made to perform tasks, where positions represent meaningful information. Regarding this line of research, Baldini Soares et al. (2019) include markers into the learning inputs for the task of relation classification and Boualili et al. (2020) into an information retrieval model. In both cases, the tokens allow the model to carefully identify the targets and to make an informed prediction. Besides these works, we are not aware of any other text-based tasks that have been tackled with this kind of information included into the models. To cover this gap, we propose to use markers to deal with target sense verification task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 222, |
|
"end": 227, |
|
"text": "(NLP)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 234, |
|
"end": 255, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 510, |
|
"end": 532, |
|
"text": "Boualili et al. (2020)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The remainder of this paper presents a brief background knowledge in Section 2. Details of our strategy, including input modification and prediction mixing is presented in Section 3. Then, unofficial and official results are presented in Section 4. Finally, conclusions are drawn in Section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "NLP research has recently been boosted by new ways to use neural networks. Two main groups of neural networks can be distinguished 2 on NLP based on the training model and feature modification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 First, classical neural networks usually use pre-trained embeddings as input and models learn their own weights during training time. Those weights are calculated directly on the target task and integration of new features or resources is intuitive. As an example, please refer to the Figure 1 the positional features (PF in the figure) that enrich the word embeddings (WF in the figure) to better represent the target words in the sentence. In this first group, models tend to use few parameters because embeddings are not fine-tuned. This characteristic does not dramatically impact the model performances.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 287, |
|
"end": 295, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 The second group of models deals with neural language models 3 such as BERT (Devlin et al., 2018) . The main difference, w.r.t. the first group, is that the weights of the models are not calculated during the training step of the target task. Instead, they are pre-calculated in an elegant but expensive fashion by using generic tasks that deal with strong initialised models. Then, these models are fine-tuned to adapt their weights to the target task. 4 (Peters et al., 2019) or SenseBERT (Levine et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 99, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 456, |
|
"end": 457, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 458, |
|
"end": 479, |
|
"text": "(Peters et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 493, |
|
"end": 514, |
|
"text": "(Levine et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We propose an alternative to mix the best of both worlds by including extra tokens into the input in order to improve prediction without re-training it. To do so, we base our strategy on the introduction of signals to the neural language models as depicted in Figure 1 In this work, we mark the target word by modifying the sentence in order to improve performance of BERT for the task of target sense verification.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 260, |
|
"end": 268, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Given a first sentence with a known target word, a second sentence with a definition, and a set of hypernyms, the target sense verification task consists in defining whether or not the target word in the first sentence corresponds to the definition or/and the set of hypernyms. Note that two sub-problems may be set if only the second sentence or the hypernyms are used. These sub-problems are presented as sub-tasks in the WiC-TSV challenge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem definition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We implemented a target sense verification system as a simplified version 5 of the architecture proposed by Baldini Soares et al. 2019, namely BERT EM . It is based on BERT (Devlin et al., 2018) , where an extra layer is added to make the classification of the sentence representation, i.e. classification is performed using as input the [CLS] token. As reported by Baldini Soares et al. 2019, an important component is the use of mark symbols to identify the entities to classify. In our case, we mark the target word in its context to let the system know where to focus on.", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 194, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 343, |
|
"text": "[CLS]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CTLR method", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Learning the similarities between a couple of sentences (sub-task 1) can easily be addressed with BERT-based models by concatenating the two inputs one after the other one as presented in Equation 1, where S 1 and S 2 are two sentences given as inputs, t 1 i (i = 1..n) are the tokens in S 1 , and t 2 j (j = 1..m) are the tokens in S 2 . In this case, the model must learn to discriminate the correct definition and also to which of the words in S 1 the definition relates to.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "input(S 1 , S 2 ) = [CLS] t 1 1 t 1 2 ... t 1 n [SEP] t 2 1 t 2 2 ... t 2 m", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To avoid the extra effort by the model to evidence the target word, we propose to introduce this information into the learning input. Thus, we mark the target word in S t by using a special token before and after the target word 6 . The input used when two sentences are compared is presented in Equation 2. S t is the first sentence with the target word t i , S d is the definition sentence, and t k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "x are their respective tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "input sp1 (S t , S d ) = [CLS] t t 1 t t 2 ... $ t t i $ ... t t n [SEP] t d 1 t d 2 ... t d m", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In the case of hypernyms (sub-task 2), the input on the left side is kept as in Equation 2, but the right side includes the tagging of each hypernym as presented in Equation 3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "input sp2 (S t , S h ) = [CLS] t t 1 t t 2 ... $ t t i $ ... t t n [SEP] s h 1 $ s h 2 $ ... $ s h l", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "3.4 Verifying the senses", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We trained two separated models, one for each subproblem using the architecture defined in Section 3.2. The output predictions of both models are 6 We used '$' but any other special token may be used.", |
|
"cite_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 147, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "used to solve the two-tasks problem. So, our overall prediction for the main problem is calculated by combining both prediction scores. First, we normalise the scores by applying a sof tmax function to each model output, and then we select the prediction with the maximum probability as shown in Equation 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "pred(x) = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 1, if m sp1 1 (x) + m sp2 1 (x) > m sp1 0 (x) + m sp2 0 (x). 0, otherwise. (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "m spk i = exp(p spk i ) j={0,1} exp(p spk j )", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "and p spk i is the prediction value for the model k for the class i (m spk i ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointing-out the target words", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The data set was manually created by the task organisers and some basic statistics are presented in Table 1 . Detailed information can be found in the task description paper (Breit et al., 2020) . No extra-annotated data was used for training. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 194, |
|
"text": "(Breit et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 107, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Sets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We implemented BERT EM of Baldini Soares et al. (2019) using the huggingface library (Wolf et al., 2019) , and trained two models with each training set. We selected the model with best performance on the development set. Parameters were fixed as follows: 20 was used as maximum epochs, Cross Entropy as loss function, Adam as optimiser, bertbase-uncased 7 as pre-trained model, and other parameters were assigned following the library recommendations (Wolf et al., 2019) . The final layer is composed of two neurons (negative or positive). 1 (resp. 2, 3, and 4) includes all sentences for which the target word appears in the first (resp. second, third, and fourth) quarter of the sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 104, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 471, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 541, |
|
"end": 562, |
|
"text": "1 (resp. 2, 3, and 4)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Implementation details", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As the test labels are not publicly available, our following analysis is performed exclusively on the development set. Results on the test set were calculated by the task organisers. We analyse confusion matrices depending on the position of the target word in the sentence as our strategy is based on marking the target word. These matrices are presented in Figure 2 . The confusion matrix labelled as position group 1 shows our results when the target word is in the first 25% positions of the S t sentence. Other matrices show the results of the remaining parts of the sentence (second, third, and fourth 25%, for respectively group 2, 3, and 4).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 359, |
|
"end": 367, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Confusion matrices show that the easiest cases are when the target word is located in the first 25%. Other parts are harder mainly because the system considers positive examples as negatives (high false negative rate). However, the system behaves correctly for negative examples independently of the position of the target word. To better understand this wrong classification of the positive examples, we calculated the true label distribution depending on the normalised prediction score as in Figure 3 . Note that positive examples are mainly located on the right side but a bulk of them are located around the middle of the figure. It means that models m sp1 and m sp2 where in conflict and average results were slightly better for the negative class. In the development set, it seems important to correctly define a threshold strategy to better define which examples are marked as positive.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 495, |
|
"end": 504, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In our experiments, we implicitly used 0.5 as threshold 8 to define either the example belongs to the 'T' or 'F' class. When comparing Figures 3 and 4 , we can clearly see that small changes in the threshold parameter would affect our results with 8 Because of the condition m sp1 a larger impact in recall than in precision. This is mainly given to the fact that our two models contradict for some examples. We also considered the class distribution depending on a normalised distance between the target token and the beginning of the sentence. From Figure 5 , we observe that both classes are less frequent at the beginning of the sentence with negative examples slightly less frequent than positive ones. It is interesting to remark that negative examples uniformly distribute after the first bin. On the contrary, the positive examples have a more unpredictable distribution indicating that a strategy based on only positions may fail. However, our strategy that combines markers to indicate the target word and a ,1 75,8 70,7 73,2 87,5 82,4 90,3 86,2 85,9 86,7 85,8 86,3 83,3 78,4 88,5 83,1 szte begab 66,9 61,6 92,5 73,9 70,2 66,5 89,6 76,4 55,1 48,9 96,8 65,0 65,4 60,5 95,3 74,0 70,2 61,3 97,4 75,2 szte2 begab 66,3 61,1 92,8 73,7 69,9 66,2 90,2 76,3 53,7 48,1 96,8 64,3 64,4 59,8 95,3 73,5 69,6 60,8 97,4 74,9 BERT -76,6 74,1 82,8 78,2 73,5 76,1 74,2 75,1 79,2 67,8 98,2 80,2 79,8 75,8 89,6 82,1 82,1 73,0 97,9 83,6 FastText -53,4 52,8 79,4 63,4 57,1 58,0 74,0 65,0 43,1 43,1 100,0 60,2 51,1 51,5 90,3 65,6 54,0 50,5 67,1 57,3 Baseline (true) 50,8 50,8 100,0 67,3 53,8 53,8 100,0 70,0 43,1 43,1 100,0 60,2 51,7 51,7 100,0 68,2 46,4 46, Finally, the main results calculated by the organisers are presented in Table 2 . The global column presents the results for the global task, including definitions and hypernyms. Our submission is identified as run2-CTLR. In the global results, our strategy outperforms participants and baselines in terms of Accuracy, Precision, and F1. Best Recall performance is unsurprisingly obtained by the baseline (true) that corresponds to a system that predicts all examples as positives. Two strong baselines are included, FastText and BERT. Both baselines were calculated by the organisers with more details in (Breit et al., 2020) . It is interesting to remark that the baseline BERT is very similar to our model but without the marked information. However, our model focuses more on improving Precision than Recall resulting with a clear improvement in terms of Accuracy but less important in terms of F1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2349, |
|
"end": 2369, |
|
"text": "(Breit et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 151, |
|
"text": "Figures 3 and 4", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 547, |
|
"end": 560, |
|
"text": "From Figure 5", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 1019, |
|
"end": 1742, |
|
"text": ",1 75,8 70,7 73,2 87,5 82,4 90,3 86,2 85,9 86,7 85,8 86,3 83,3 78,4 88,5 83,1 szte begab 66,9 61,6 92,5 73,9 70,2 66,5 89,6 76,4 55,1 48,9 96,8 65,0 65,4 60,5 95,3 74,0 70,2 61,3 97,4 75,2 szte2 begab 66,3 61,1 92,8 73,7 69,9 66,2 90,2 76,3 53,7 48,1 96,8 64,3 64,4 59,8 95,3 73,5 69,6 60,8 97,4 74,9 BERT -76,6 74,1 82,8 78,2 73,5 76,1 74,2 75,1 79,2 67,8 98,2 80,2 79,8 75,8 89,6 82,1 82,1 73,0 97,9 83,6 FastText -53,4 52,8 79,4 63,4 57,1 58,0 74,0 65,0 43,1 43,1 100,0 60,2 51,1 51,5 90,3 65,6 54,0 50,5 67,1 57,3 Baseline (true) 50,8 50,8 100,0 67,3 53,8 53,8 100,0 70,0 43,1 43,1 100,0 60,2 51,7 51,7 100,0 68,2 46,4 46,", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1815, |
|
"end": 1822, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "1 (x) + m sp2 1 (x) > m sp1 0 (x) + m sp2 0 (x).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Organisers also provide results grouped by different types of examples. They included four types with three of them from domains that were not included in the training set 9 . From Table 2 , we can also conclude that our system is able to adapt 9 More details in (Breit et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 245, |
|
"end": 246, |
|
"text": "9", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 263, |
|
"end": 283, |
|
"text": "(Breit et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 181, |
|
"end": 188, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "to out-of-domain topics as it is clearly shown for the Cocktails type in terms of F1, and also for the Medical entities type to a less extent. However, our system fails to provide better results than the standard BERT in terms of F1 for the Computer Science type. But, in terms of Accuracy, our strategy outperforms for a large margin the out-of-domain types (8.3, 6.1, and 1.2 improvements in absolute points for Cocktails, Medical entities, and Computer Science respectively). Surprisingly, it fails on both, F1 and Accuracy, for WordNet/Wiktionary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "This paper describes our participation in the WiC-TSV task. We proposed a simple but effective strategy for target sense verification. Our system is based on BERT and introduces markers around the target words to better drive the learned model. Our results are strong over an unseen collection used to verify senses. Indeed, our method (Acc=78, 3) outperforms other participants (second best participant, Acc=66, 9) and strong baselines (BERT, Acc=76, 6) when compared in terms of Accuracy, the official metric. This margin is even larger when the results are compared for the out-of-domain examples of the test collection. Thus, the results suggest that the extra information provided to the BERT model through the markers clearly boost performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "As future work, we plan to complete the evaluation of our system with the WiC dataset (Pilehvar and Camacho-Collados, 2019) as well as the integration of the model into a recent multi-lingual entity linking system (Linhares Pontes et al., 2020) by marking the anchor texts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 244, |
|
"text": "(Linhares Pontes et al., 2020)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "University of Caen Normandie, University of Toulouse, and University of La Rochelle team.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We are aware that our classification is arguable. Although this is not an established classification in the field, it seems important for us to make a difference between them as this work tries to introduce well-established concepts from the first group into the second one.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Some subcategories may exist.4 We can image a combination of both, but models that use BERT as embeddings and do not fine-tune BERT weights may be classified in the first group.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used the EntityMarkers[CLS] version.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/google-research/bert", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work has been partly supported by the European Union's Horizon 2020 research and innovation programme under grant 825153 (EMBED-DIA).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Matching the Blanks: Distributional Similarity for Relation Learning", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Livio Baldini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Soares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Fitzgerald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2895--2905", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Livio Baldini Soares, Nicholas FitzGerald, Jeffrey Ling, and Tom Kwiatkowski. 2019. Matching the Blanks: Distributional Similarity for Relation Learn- ing. In ACL. 2895-2905.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "MarkedBERT: Integrating Traditional IR Cues in Pre-Trained Language Models for Passage Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Lila", |
|
"middle": [], |
|
"last": "Boualili", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jose", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Moreno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohand", |
|
"middle": [], |
|
"last": "Boughanem", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '20)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1977--1980", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lila Boualili, Jose G. Moreno, and Mohand Boughanem. 2020. MarkedBERT: Integrating Traditional IR Cues in Pre-Trained Language Models for Passage Retrieval. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '20). Association for Computing Machinery, 1977-1980.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Mohammad Taher Pilehvar, and Jose Camacho-Collados. 2020. WiC-TSV: An Evaluation Benchmark for Target Sense Verification of Words in Context", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Breit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Artem", |
|
"middle": [], |
|
"last": "Revenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiamehr", |
|
"middle": [], |
|
"last": "Rezaee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.15016" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Breit, Artem Revenko, Kiamehr Rezaee, Moham- mad Taher Pilehvar, and Jose Camacho-Collados. 2020. WiC-TSV: An Evaluation Benchmark for Target Sense Verification of Words in Context. arXiv:2004.15016 (2020).", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. arXiv:1810.04805 (2018).", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Sensebert: Driving some Sense into Bert", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Levine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barak", |
|
"middle": [], |
|
"last": "Lenz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Or", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Padnos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Or", |
|
"middle": [], |
|
"last": "Sharir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shai", |
|
"middle": [], |
|
"last": "Shalev-Shwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amnon", |
|
"middle": [], |
|
"last": "Shashua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Shoham", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.05646" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Levine, Barak Lenz, Or Dagan, Dan Padnos, Or Sharir, Shai Shalev-Shwartz, Amnon Shashua, and Yoav Shoham. 2019. Sensebert: Driving some Sense into Bert. arXiv:1908.05646 (2019).", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Linking Named Entities across Languages Using Multilingual Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Jose", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Elvys Linhares Pontes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Moreno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Doucet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the ACM/IEEE Joint Conference on Digital Libraries in 2020 (JCDL '20)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "329--332", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elvys Linhares Pontes, Jose G. Moreno, and Antoine Doucet. 2020. Linking Named Entities across Lan- guages Using Multilingual Word Embeddings. In Proceedings of the ACM/IEEE Joint Conference on Digital Libraries in 2020 (JCDL '20). Association for Computing Machinery, 329-332.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Knowledge Enhanced Contextual Word Representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Logan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vidur", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E. Peters, Mark Neumann, Robert L. Lo- gan, Roy Schwartz, Vidur Joshi, Sameer Singh, and Noah A. Smith. 2019. Knowledge Enhanced Con- textual Word Representations. In EMNLP. 43-54.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "WiC: the Word-in-Context Dataset for Evaluating Context-Sensitive Meaning Representations", |
|
"authors": [ |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Taher Pilehvar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jose", |
|
"middle": [], |
|
"last": "Camacho-Collados", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1267--1273", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Taher Pilehvar and Jose Camacho- Collados. 2019. WiC: the Word-in-Context Dataset for Evaluating Context-Sensitive Meaning Represen- tations. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers). 1267- 1273.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Morgan Funtowicz, and Jamie Brew. 2019. HuggingFace's Transformers: State-of-the-art Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.03771" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. HuggingFace's Trans- formers: State-of-the-art Natural Language Process- ing. arXiv:1910.03771 (2019).", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Relation classification via convolutional deep neural network", |
|
"authors": [ |
|
{ |
|
"first": "Daojian", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siwei", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guangyou", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2335--2344", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daojian Zeng, Kang Liu, Siwei Lai, Guangyou Zhou, and Jun Zhao. 2014. Relation classification via convolutional deep neural network. In Proceedings of COLING 2014, the 25th International Confer- ence on Computational Linguistics: Technical Pa- pers. 2335-2344.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Representation examples for the relation classification problem proposed by Zeng et al. (2014) (a) and Baldini Soares et al. (2019) (b and c)." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "(c) and done by Baldini Soares et al. (2019). Note that in this case the input is modified by introducing extra tokens ([E1], [/E1], [E2], and [/E2] are added based on target words (Baldini Soares et al., 2019)) that help the system to point out the target words." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Confusion matrices for different position groups. Group" |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Histograms of predicted values in the dev set." |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Precision/Recall curve for the development set for different threshold values." |
|
}, |
|
"FIGREF5": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Position distribution based on the target token distances." |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "" |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>strong neural language model (BERT) successfully</td></tr><tr><td>manage to classify the examples.</td></tr></table>", |
|
"text": "Accuracy, Precision, Recall and F1 results of participants and baselines. Results where split by type. General results are included in column 'Global'. All results were calculated by the task organisers(Breit et al., 2020) as participants have not access to test labels. Best performance for each global metric is marked in bold for automatic systems." |
|
} |
|
} |
|
} |
|
} |