|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:27:24.819651Z" |
|
}, |
|
"title": "Learning from Unlabelled Data for Clinical Semantic Textual Similarity", |
|
"authors": [ |
|
{ |
|
"first": "Yuxia", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Melbourne Victoria", |
|
"location": { |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Karin", |
|
"middle": [], |
|
"last": "Verspoor", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Melbourne Victoria", |
|
"location": { |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Melbourne Victoria", |
|
"location": { |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Domain pretraining followed by task finetuning has become the standard paradigm for NLP tasks, but requires in-domain labelled data for task fine-tuning. To overcome this, we propose to utilise unlabelled domain data by assigning pseudo-labels from a general model. We evaluate the approach on two clinical STS datasets, and achieve r = 0.80 on N2C2-STS. Further investigation reveals that if the data distribution of unlabelled sentence pairs is closer to the test data, we can obtain better performance. By leveraging a large general-purpose STS dataset and small-scale in-domain training data, we obtain further improvements to r = 0.90, a new SOTA.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Domain pretraining followed by task finetuning has become the standard paradigm for NLP tasks, but requires in-domain labelled data for task fine-tuning. To overcome this, we propose to utilise unlabelled domain data by assigning pseudo-labels from a general model. We evaluate the approach on two clinical STS datasets, and achieve r = 0.80 on N2C2-STS. Further investigation reveals that if the data distribution of unlabelled sentence pairs is closer to the test data, we can obtain better performance. By leveraging a large general-purpose STS dataset and small-scale in-domain training data, we obtain further improvements to r = 0.90, a new SOTA.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Semantic textual similarity (STS) measures the degree of semantic equivalence between two text snippets, based on a graded numerical value, with applications including question answering (Yadav et al., 2020) , duplicate detection (Poerner and Sch\u00fctze, 2019) , and entity linking (Zhou et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 207, |
|
"text": "(Yadav et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 257, |
|
"text": "(Poerner and Sch\u00fctze, 2019)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 298, |
|
"text": "(Zhou et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Modern pretrained language models have achieved impressive results for general STS (Devlin et al., 2019) . However in low-resource domains without in-domain labelled data, results are generally lower (Wang et al., 2020b) . In the clinical domain in particular, annotation requires medical experts (Wang et al., 2018; Romanov and Shivade, 2018) , meaning that labelled datasets are generally small, hampering clinical STS.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 104, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 220, |
|
"text": "(Wang et al., 2020b)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 297, |
|
"end": 316, |
|
"text": "(Wang et al., 2018;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 343, |
|
"text": "Romanov and Shivade, 2018)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We address the question of how to apply pretrained language models to such domain-specific tasks where there is little or no labelled data, focusing specifically on the task of clinical STS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Employing a general STS model generally yields poor results over technical domains due to covariate shift. To bridge this gap, a standard approach is to pretrain the LM on in-domain text, such as Clin-icalBERT (Alsentzer et al., 2019) using MIMIC-III (Johnson et al., 2016) . However, existing research has tended to estimate effectiveness under the fine-tuning setting, rather than via inference tasks (Peng et al., 2019; Wang et al., 2020b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 234, |
|
"text": "(Alsentzer et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 241, |
|
"end": 273, |
|
"text": "MIMIC-III (Johnson et al., 2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 403, |
|
"end": 422, |
|
"text": "(Peng et al., 2019;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 442, |
|
"text": "Wang et al., 2020b)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we first evaluate domain pretraining approaches for clinical STS, with no labelled data. Based on the assumption that general STS models trained on large-scale STS datasets will perform reasonably well on clinical sentence pairs (Section 4), we then experiment with learning from the pseudo-labelled data (Section 5).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Experimental results show both domain pretraining and pseudo-labelled data fine-tuning improve clinical STS, and the combination of the two achieves the best performance of r = 0.80 on N2C2-STS (Section 6.3). Further analysis shows that the score distribution and volume of pseudolabelled pairs influence the performance of finetuning. We also find that training for more iterations leads to minor improvements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The paper makes three major contributions: (1) we propose a simple pseudo-training method, and show it to perform well on clinical STS; (2) we evaluate several existing approaches to clinical STS in a zero-shot setting, and benchmark against our method; and (3) we achieve state-of-the-art results of r = 0.90 for N2C2-STS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The general approach to domain-specific task modelling is: (1) pretrain a language model (LM) on a large volume of open-domain text (Devlin et al., 2019; Liu et al., 2019) ; and (2) fine-tune on domainspecific text and task-specific labelled data (Gururangan et al., 2020; Peng et al., 2019) . For this approach, however, domain-specific labelled data is required, an assumption that we seek to relax.", |
|
"cite_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 153, |
|
"text": "(Devlin et al., 2019;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 154, |
|
"end": 171, |
|
"text": "Liu et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 272, |
|
"text": "(Gururangan et al., 2020;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 273, |
|
"end": 291, |
|
"text": "Peng et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For STS, in the absence of labelled data, the simplest approach is to calculate the cosine similarity between the CLS-vectors of two sentences or averaged last-layer embeddings, but this tends to perform poorly, even worse than averaged GloVe (Pennington et al., 2014) embeddings. SBERT (Reimers and Gurevych, 2019) proposed to use a Siamese structure based on BERT to learn sentence representations, where they fine-tuned the model over general NLI data, and continued to fine-tune on general STS data (STS-B) (Cer et al., 2017) . In this work, we experiment with this approach specifically in the clinical context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 243, |
|
"end": 268, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 287, |
|
"end": 315, |
|
"text": "(Reimers and Gurevych, 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 511, |
|
"end": 529, |
|
"text": "(Cer et al., 2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We select two available clinical STS benchmark datasets for evaluation: MedSTS (Wang et al., 2018) and N2C2-STS (Wang et al., 2020a) . The latter annotated 412 instances as new test bed, and updated train partition by labelling extra 574 instances and merging the former train and test cases (see Table 1 ). Our aim is to predict a score, given a sentence pair (S1, S2), closing to the gold label -a numerical value ranging from 0 to 5, where 0 refers to completely dissimilar semantics while 5 is completely equivalent in the meaning.", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 98, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 112, |
|
"end": 132, |
|
"text": "(Wang et al., 2020a)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 297, |
|
"end": 304, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets and Tasks", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "For example, S1: Discussed goals, risks, alternatives, advanced directives, and the necessity of other members of the surgical team participating in the procedure with the patient. S2: Discussed risks, goals, alternatives, advance directives, and the necessity of other members of the healthcare team participating in the procedure with the patient and his mother. Label: 4, as the two sentences are mostly equivalent and differ only in unimportant details (in bold).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets and Tasks", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Pearson's correlation (r) and Spearman's correlation (\u03c1) between the predicted and gold standard scores are used as evaluation metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets and Tasks", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In modern NLP, large amounts of high-quality training data are a key element in building successful systems (Aharoni and Goldberg, 2020) . This is also the case with STS, where additional training data has been shown to improve accuracy (Wang et al., 2020b) . However, domain shifts inevitably lead to performance drops (Gururangan et al., 2020). Therefore, we ask: RQ1 Can large-scale generaldomain labelled STS data be transferred to train Effect of Larger General STS Corpus. We source general-domain labelled data from: (1) SemEval-STS shared tasks 2012-2017 (Agirre et al., 2012 (Agirre et al., , 2013 (Agirre et al., , 2014 (Agirre et al., , 2015 (Agirre et al., , 2016 Cer et al., 2017) ; and SICK-R (Marelli et al., 2014) . This results in a total of 28,518 labelled sentence pairs, which we refer to as \"STS-G\". We adapt a BERT encoder connected to a linear regression layer to fine-tune a general-domain STS model using STS-G, where the CLS-vector is used to represent the sentence pair (CLS-BERT). We compare this with a model trained only on STS-B. We evaluate both models on STS-B dev (same setup as Section 6.1).", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 136, |
|
"text": "(Aharoni and Goldberg, 2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 257, |
|
"text": "(Wang et al., 2020b)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 563, |
|
"end": 583, |
|
"text": "(Agirre et al., 2012", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 584, |
|
"end": 606, |
|
"text": "(Agirre et al., , 2013", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 607, |
|
"end": 629, |
|
"text": "(Agirre et al., , 2014", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 630, |
|
"end": 652, |
|
"text": "(Agirre et al., , 2015", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 653, |
|
"end": 675, |
|
"text": "(Agirre et al., , 2016", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 676, |
|
"end": 693, |
|
"text": "Cer et al., 2017)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 707, |
|
"end": 729, |
|
"text": "(Marelli et al., 2014)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Observations", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For clinical STS, we employ a hierarchical convolution (HConv) model based on BERT (updating parameters of the last four layers), where the model is first fine-tuned with STS-B, then N2C2-STS is augmented by back-translation (Wang et al., 2020b) . The model architecture and hyperparameter settings are the same as the original paper, such that we merely replace STS-B with STS-G, and observe that more training data improves clincial STS.", |
|
"cite_spans": [ |
|
{ |
|
"start": 225, |
|
"end": 245, |
|
"text": "(Wang et al., 2020b)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Observations", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "As shown in Table 2 , the extra training data in STS-G results in an increase in r of up to .028, in the case of HConvBERT (Wang et al., 2020b) , resulting in a new SOTA of r = .902.", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 143, |
|
"text": "(Wang et al., 2020b)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Observations", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Discussion. Though general-domain data lacks clinical information, the model clearly benefits from the extra out-of-domain training data (answering RQ1). This inspires us to rethink the clinical STS task as a combination of domain-specific text understanding and domain-invariant task learning, leading to the question: can the two aspects be learned separately? That is, can task learning take place via large volumes of general-domain labelled data, and domain-specific characteristics be learned from silver-standard labelled domain data, such as low-quality clinical sentence pairs labelled by a general STS model?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Observations", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Next, we investigate the use of pseudo-labelled clinical data based on the general STS model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Gururangan et al. 2020illustrate that if the data distribution of the text used for pretraining is more similar to the task data, the performance will be better. Based on this, we propose a distribution-centric strategy for generating and selecting sentence pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pseudo-Labelled Sentence Pairs", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Generation. Two data sources -MIMIC-III clinical notes and N2C2-STS training data (ignoring labels) -are used to generate unlabelled sentence pairs. We sample 10,000 discharge summaries from MIMIC-III, which we segment into 27 parts based on section subtitles. Of these, we select five sections we consider to be most related to the N2C2-STS task: diagnosis, medications, history of present illness, follow-up instructions and physical exam. After sentence segmentation using SpaCy (Honnibal and Montani, 2017) , we randomly sample sentence pairs from each section partition.", |
|
"cite_spans": [ |
|
{ |
|
"start": 482, |
|
"end": 510, |
|
"text": "(Honnibal and Montani, 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pseudo-Labelled Sentence Pairs", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Labelling and Sampling. We take the CLS-BERT model trained on STS-G, and generate a score for all sentence pairs. To balance the data, we group into 5 equal-width bands based on score: [0.0, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0] and (4.0, 5.0]. We use all pairs whose assigned score is above 3.0, and sample N pairs from the other three intervals.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pseudo-Labelled Sentence Pairs", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We fine-tune the model over the resulting pseudolabelled data, repeat the process of labelling and sampling, and further fine-tune the model on the second set of pseudo-labelled data. 229622 211405 54517 4015 441 STS-PL 4015 4015 4015 4015 441 100k 45602 42479 10996 839 84 STS-PS 1500 1500 1500 839 84 500k 399975 81282 16841 1468 434 STS-DP 1468 1468 1468 1468 434 Table 3 : Score distribution of 500k sentence pairs used for STS-PL and 100k pairs used for STS-PS. STS-DP is based on a domain-pretrained model (see Section 6.3).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 412, |
|
"text": "229622 211405 54517 4015 441 STS-PL 4015 4015 4015 4015 441 100k 45602 42479 10996 839 84 STS-PS 1500 1500 1500 839 84 500k 399975 81282 16841 1468 434 STS-DP 1468 1468 1468 1468 434 Table 3", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Iterative Training", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We first evaluate existing approaches for clinical STS in the zero-shot setting, and compare with our method. Then we analyse the impact of the volume of sampled instances and data distribution on the fine-tuning quality. We experiment with the number of iterations in Section 6.5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We evaluate over MedSTS and N2C2-STS. As gathering naturally occurring pairs of sentences with different degrees of semantic similarity is very challenging (Wang et al., 2018) , only 84 instances in (4.0, 5.0] are sampled from a group of 100k unlabelled sentence pairs (see Table 3 ). To increase the number of instances with high similarity, another group of 500k unlabelled sentence pairs is generated from discharge summaries. Limiting to cases above 3.0, (1) \"STS-PS\" (Pseudo-labelled Small) = 5,423 pairs, is sampled from 100k based on N = 1500; and (2) \"STS-PL\" (Pseudo-labelled Large) = 16,501 pairs, is sampled from 500k based on N = 4015. Unless otherwise indicated, pseudo labelling is based on CLS-BERT base -STS-G (see Section 4). All models are trained with a batch size of 16, learning rate of 2e-5, and 3 epochs with linear scheduler setting warmup proportion of 0.1 of fine-tuning. For all CLS-BERT models, we update all 12 layers, and for HConvBERT we update the last 4 layers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 175, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 274, |
|
"end": 281, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "We perform experiments over three models (SBERT, CLS-BERT, and HConvBERT), two pretraining configurations (general and clinical), and four training datasets (general gold-labelled STS-B and STS-G, clinical pseudo-labelled STS-PL and STS-PS).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Results are presented in Tables 4 and 5 model descriptors -\"base\" and \"clinical\" -correspond to the two pretraining configurations, general and clinical. The \"Data\" column indicates the corpus used for fine-tuning, and A+B means that the model is first fine-tuned on A then fine-tuned on B. The model using general (\"base\") pretraining and fine-tuning only on STS-B or STS-G is referred to as the \"general STS model\". Both pretraining using in-domain text (\"clinical\") and fine-tuning on pseudo-labelled data (+STS-PS/STS-PL) improve performance over the general STS model, with fine-tuning on pseudo-labelled data generally performing better than domain pretraining, in addition to being computationally cheaper.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 39, |
|
"text": "Tables 4 and 5", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "It may be argued that the performance improvement is gained simply as a result of using an enlarged data set for fine-tuning, instead of learning domain characteristics from clinical pseudolabelled data. However, for both datasets, and under CLS-BERT base and HConvBERT base , comparing results using: (1) STS-B with size of 5,749;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "(2) STS-B + STS-PS with size of 11,172 (5,749 + 5.423); and (3) STS-G with size of 28,518, we find that both (2) and (3) have higher r and \u03c1 than (1), suggesting that enlarging the data size for fine-tuning is beneficial to improving performance. Simutaneously, (2) always performs much better than (3) though (3) is larger and has more gold la- bels; this indicates the gains are mainly attributable to learned domain characteristics rather than merely increased data. Moreover, based on the results for CLS-BERT base and HConvBERT base using STS-PL and STS-PS, it would appear that the amount and score distribution of the pseudo-labelled data influences fine-tuning performance, which we return to investigate further in Section 6.4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We adapt CLS-BERT clinical -STS-G to predict scores for 500,000 pairs, generating STS-DP (6,306) after sampling as shown in Table 3 . We continue to fine-tune CLS-BERT clinical -STS-G using STS-DP, boosting the performance to r = .803 and \u03c1 = .788, from r = .788 and \u03c1 = .768.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 131, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Combination of Domain Pretraining (DP) and Fine-tuning", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "In this section, we investigate how data source, score distribution -percentage of instances distributed in five score interval, and the volume of sampled instances influence fine-tuning performance. Based on CLS-BERT base with STS-G, we continue to fine-tune over five different groups of data: (1) N2C2-STS training data without goldstandard labels, where the score distribution of pseudo labels is 0. and with the score distribution as (1); (3) uniformly sampled from STS-PL with 330 pairs in each score interval; (4) proportionally sampled from STS-PL at a ratio of 1/10 for each score interval; and (5) full STS-PL. Comparing Experiments 2, 3 and 4 in Table 6 , which have same data source and size (1.6k), and differ only in score distribution, we observe only minor performance differences. Experiments 1 and 2 rely on different sources, where Experiment 1 has the same source as the test data, and performs much better than Experiment 2. An aligned data source therefore is the optimal scenario. Looking at Experiments 4 and 5, where the difference is in the amount of sampled data, it is clear that more instances brings further improvements. But Could performance be improved consistently with increased pseudo-labelled data?", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 657, |
|
"end": 664, |
|
"text": "Table 6", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Impact of Data Distribution and Amount", |
|
"sec_num": "6.4" |
|
}, |
|
{ |
|
"text": "To answer this question, we proportionally sampled from STS-PL by ratio of 0.1, 0.2, 0.3, 0.4, 0.6, 0.8, 1.0, and also sampled from 500k unlabelled sentence pairs setting N = 5000, 6000, 7000, 7500, 8000, resulting in 12 subsets in sizes ranging from 1,648 to 28,456, for fine-tuning based on CLS-BERT base -STS-G. As shown in Figure 1 , 1 from 0 to 16,501, both r and \u03c1 gradually increase, and then fluctuate around 0.77 and 0.76 resp. This reveals the trade-off between increasing the number of pseudo-labelled fine-tuning instances and error propagation due to cumulative noise.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 327, |
|
"end": 335, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Impact of Data Distribution and Amount", |
|
"sec_num": "6.4" |
|
}, |
|
{ |
|
"text": "Based on CLS-BERT base with STS-G, we investigate the impact of multiple iterations of fine-tuning Table 7 : Results on N2C2-STS through differing number of iterations of iterative fine-tuning. Amount = number of fine-tuning instances.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 106, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Impact of Number of Iterations", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "in Table 7 , as introduced in Section 5.2. The performance boost from additional iterations is modest. Increasing iterations from 2 to 3, the accuracy does not improve, which is consistent with the findings in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 218, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Impact of Number of Iterations", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "In this paper, we have proposed a simple method of pseudo-labelling in-domain data and iterative training, to improve clinical STS. Evaluation over two clinical STS datasets demonstrates the effectiveness of the approach, and domain pretraining is shown to achieve further improvements. Further investigation indicated that keeping the distribution of pseudo-labelled instances close to that of the in-domain data improves performance. We also observed modest improvements through more iterations of iterative training. Our work provides an alternative approach to employing domain-specific unlabelled data to support clinical STS. As future work, we plan to explore the application of our method to other model structures such as SBERT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Random sampling affects the model performance, particularly when the data size is less than 5000, so we sampled five times for 1648, 3300 and 4948, so these results are averages over multiple samples of the given size.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported in part by China Scholarship Council (CSC). We are grateful to the anonymous reviewers for their insightful comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Semeval-2015 task 2: Semantic textual similarity, english, spanish and pilot on interpretability", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carmen", |
|
"middle": [], |
|
"last": "Banea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Gonzalez-Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Inigo", |
|
"middle": [], |
|
"last": "Lopez-Gazpio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Montse", |
|
"middle": [], |
|
"last": "Maritxalar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 9th international workshop on semantic evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "252--263", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Carmen Banea, Claire Cardie, Daniel Cer, Mona Diab, Aitor Gonzalez-Agirre, Weiwei Guo, Inigo Lopez-Gazpio, Montse Maritxalar, Rada Mihalcea, et al. 2015. Semeval-2015 task 2: Seman- tic textual similarity, english, spanish and pilot on interpretability. In Proceedings of the 9th interna- tional workshop on semantic evaluation (SemEval 2015), pages 252-263.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Semeval-2014 task 10: Multilingual semantic textual similarity", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carmen", |
|
"middle": [], |
|
"last": "Banea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Gonzalez-Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "German", |
|
"middle": [], |
|
"last": "Rigau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janyce", |
|
"middle": [], |
|
"last": "Wiebe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 8th international workshop on semantic evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "81--91", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Carmen Banea, Claire Cardie, Daniel Cer, Mona Diab, Aitor Gonzalez-Agirre, Weiwei Guo, Rada Mihalcea, German Rigau, and Janyce Wiebe. 2014. Semeval-2014 task 10: Multilingual semantic textual similarity. In Proceedings of the 8th international workshop on semantic evaluation (SemEval 2014), pages 81-91.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Semeval-2016 task 1: Semantic textual similarity, monolingual and cross-lingual evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carmen", |
|
"middle": [], |
|
"last": "Banea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Gonzalez-Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "German", |
|
"middle": [], |
|
"last": "Rigau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janyce", |
|
"middle": [], |
|
"last": "Wiebe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "497--511", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Carmen Banea, Daniel Cer, Mona Diab, Aitor Gonzalez-Agirre, Rada Mihalcea, German Rigau, and Janyce Wiebe. 2016. Semeval-2016 task 1: Semantic textual similarity, monolingual and cross-lingual evaluation. In Proceedings of the 10th International Workshop on Semantic Evalua- tion (SemEval-2016), pages 497-511.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "* sem 2013 shared task: Semantic textual similarity", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Gonzalez-Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Main Conference and the Shared Task: Semantic Textual Similarity", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "32--43", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Daniel Cer, Mona Diab, Aitor Gonzalez- Agirre, and Weiwei Guo. 2013. * sem 2013 shared task: Semantic textual similarity. In Second Joint Conference on Lexical and Computational Seman- tics (* SEM), Volume 1: Proceedings of the Main Conference and the Shared Task: Semantic Textual Similarity, pages 32-43.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Semeval-2012 task 6: A pilot on semantic textual similarity", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Gonzalez-Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the First Joint Conference on Lexical and Computational Semantics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "385--393", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Mona Diab, Daniel Cer, and Aitor Gonzalez-Agirre. 2012. Semeval-2012 task 6: A pi- lot on semantic textual similarity. In Proceedings of the First Joint Conference on Lexical and Computa- tional Semantics-Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Pro- ceedings of the Sixth International Workshop on Se- mantic Evaluation, pages 385-393. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Unsupervised domain clusters in pretrained language models", |
|
"authors": [ |
|
{ |
|
"first": "Roee", |
|
"middle": [], |
|
"last": "Aharoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7747--7763", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roee Aharoni and Yoav Goldberg. 2020. Unsupervised domain clusters in pretrained language models. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 7747- 7763, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Publicly available clinical BERT embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Alsentzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Boag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Hung", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jindi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Naumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Mcdermott", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2nd Clinical Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "72--78", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Alsentzer, John Murphy, William Boag, Wei- Hung Weng, Di Jindi, Tristan Naumann, and Matthew McDermott. 2019. Publicly available clini- cal BERT embeddings. In Proceedings of the 2nd Clinical Natural Language Processing Workshop, pages 72-78, Minneapolis, Minnesota, USA.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "SemEval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I\u00f1igo", |
|
"middle": [], |
|
"last": "Lopez-Gazpio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Cer, Mona Diab, Eneko Agirre, I\u00f1igo Lopez- Gazpio, and Lucia Specia. 2017. SemEval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 1-14, Vancouver, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Don't stop pretraining: Adapt language models to domains and tasks", |
|
"authors": [ |
|
{ |
|
"first": "Ana", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Marasovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Downey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8342--8360", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.740" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Ana Marasovi\u0107, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A. Smith. 2020. Don't stop pretraining: Adapt language models to domains and tasks. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8342-8360, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "2017. spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Honnibal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ines", |
|
"middle": [], |
|
"last": "Montani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Honnibal and Ines Montani. 2017. spaCy 2: Natural language understanding with Bloom embed- dings, convolutional neural networks and incremen- tal parsing. To appear.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Mimiciii, a freely accessible critical care database", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Alistair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Pollard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H Lehman", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mengling", |
|
"middle": [], |
|
"last": "Li-Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Ghassemi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Moody", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [ |
|
"Anthony" |
|
], |
|
"last": "Szolovits", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger G", |
|
"middle": [], |
|
"last": "Celi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Scientific data", |
|
"volume": "3", |
|
"issue": "1", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alistair EW Johnson, Tom J Pollard, Lu Shen, H Lehman Li-Wei, Mengling Feng, Moham- mad Ghassemi, Benjamin Moody, Peter Szolovits, Leo Anthony Celi, and Roger G Mark. 2016. Mimic- iii, a freely accessible critical care database. Scien- tific data, 3(1):1-9.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Semeval-2014 task 1: Evaluation of compositional distributional semantic models on full sentences through semantic relatedness and textual entailment", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Marelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raffaella", |
|
"middle": [], |
|
"last": "Bernardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Menini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Zamparelli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 8th international workshop on semantic evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Marelli, Luisa Bentivogli, Marco Baroni, Raf- faella Bernardi, Stefano Menini, and Roberto Zam- parelli. 2014. Semeval-2014 task 1: Evaluation of compositional distributional semantic models on full sentences through semantic relatedness and textual entailment. In Proceedings of the 8th international workshop on semantic evaluation (SemEval 2014), pages 1-8.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Transfer learning in biomedical natural language processing: An evaluation of BERT and ELMo on ten benchmarking datasets", |
|
"authors": [ |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shankai", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 18th BioNLP Workshop and Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "58--65", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5006" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yifan Peng, Shankai Yan, and Zhiyong Lu. 2019. Transfer learning in biomedical natural language processing: An evaluation of BERT and ELMo on ten benchmarking datasets. In Proceedings of the 18th BioNLP Workshop and Shared Task, pages 58- 65, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of the 2014 conference on empirical methods in natural language process- ing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Multiview domain adapted sentence embeddings for lowresource unsupervised duplicate question detection", |
|
"authors": [ |
|
{ |
|
"first": "Nina", |
|
"middle": [], |
|
"last": "Poerner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1630--1641", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1173" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nina Poerner and Hinrich Sch\u00fctze. 2019. Multi- view domain adapted sentence embeddings for low- resource unsupervised duplicate question detection. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 1630- 1641, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Sentence-BERT: Sentence embeddings using Siamese BERTnetworks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3982--3992", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1410" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- BERT: Sentence embeddings using Siamese BERT- networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 3982-3992, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Lessons from natural language inference in the clinical domain", |
|
"authors": [ |
|
{ |
|
"first": "Alexey", |
|
"middle": [], |
|
"last": "Romanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaitanya", |
|
"middle": [], |
|
"last": "Shivade", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1586--1596", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1187" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexey Romanov and Chaitanya Shivade. 2018. Lessons from natural language inference in the clin- ical domain. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, pages 1586-1596, Brussels, Belgium. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "MedSTS: a resource for clinical semantic textual similarity. Language Resources and Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Yanshan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naveed", |
|
"middle": [], |
|
"last": "Afzal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunyang", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liwei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feichen", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Majid", |
|
"middle": [], |
|
"last": "Rastegar-Mojarad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongfang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yanshan Wang, Naveed Afzal, Sunyang Fu, Liwei Wang, Feichen Shen, Majid Rastegar-Mojarad, and Hongfang Liu. 2018. MedSTS: a resource for clini- cal semantic textual similarity. Language Resources and Evaluation, pages 1-16.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Overview of the 2019 n2c2/ohnlp track on clinical semantic textual similarity", |
|
"authors": [ |
|
{ |
|
"first": "Yanshan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunyang", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongfang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yanshan Wang, Sunyang Fu, and Hongfang Liu. 2020a. Overview of the 2019 n2c2/ohnlp track on clinical semantic textual similarity. Preprint.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Evaluating the utility of model configurations and data augmentation on clinical semantic textual similarity", |
|
"authors": [ |
|
{ |
|
"first": "Yuxia", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karin", |
|
"middle": [], |
|
"last": "Verspoor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 19th SIGBioMed Workshop on Biomedical Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "105--111", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuxia Wang, Fei Liu, Karin Verspoor, and Timothy Baldwin. 2020b. Evaluating the utility of model configurations and data augmentation on clinical se- mantic textual similarity. In Proceedings of the 19th SIGBioMed Workshop on Biomedical Language Pro- cessing, pages 105-111, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Unsupervised alignment-based iterative evidence retrieval for multi-hop question answering", |
|
"authors": [ |
|
{ |
|
"first": "Vikas", |
|
"middle": [], |
|
"last": "Yadav", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4514--4525", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.414" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vikas Yadav, Steven Bethard, and Mihai Surdeanu. 2020. Unsupervised alignment-based iterative evi- dence retrieval for multi-hop question answering. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4514- 4525, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Improving candidate generation for low-resource cross-lingual entity linking", |
|
"authors": [ |
|
{ |
|
"first": "Shuyan", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shruti", |
|
"middle": [], |
|
"last": "Rijhwani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Wieting", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "109--124", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuyan Zhou, Shruti Rijhwani, John Wieting, Jaime Carbonell, and Graham Neubig. 2020. Improving candidate generation for low-resource cross-lingual entity linking. Transactions of the Association for Computational Linguistics, 8:109-124.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"num": null, |
|
"html": null, |
|
"text": "Clinical STS datasets. Train and Test Size = number of text pairs. Len = mean sentence length in tokens.", |
|
"content": "<table><tr><td>Dataset</td><td colspan=\"3\">Len Train Size Test Size</td></tr><tr><td>MedSTS</td><td>25.4</td><td>750</td><td>318</td></tr><tr><td colspan=\"2\">N2C2-STS 19.3</td><td>1642</td><td>412</td></tr><tr><td colspan=\"2\">Eval set / Model Data</td><td/><td>r</td><td>\u03c1</td></tr><tr><td>STS-B dev:</td><td/><td/><td/></tr><tr><td>CLS-BERT</td><td>STS-B train</td><td/><td colspan=\"2\">.900 .896</td></tr><tr><td>CLS-BERT</td><td>STS-G</td><td/><td colspan=\"2\">.928 .927</td></tr><tr><td>N2C2-STS test:</td><td/><td/><td/></tr><tr><td>HConvBERT</td><td colspan=\"4\">STS-B train + N2C2-STS train .894 .830</td></tr><tr><td>HConvBERT</td><td colspan=\"2\">STS-G + N2C2-STS train</td><td colspan=\"2\">.902 .836</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"html": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"html": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"html": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"html": null, |
|
"text": "", |
|
"content": "<table><tr><td>: Results for CLS-BERT base -STS-G on N2C2-</td></tr><tr><td>STS based on fine-tuning on different datasets. Exp.1</td></tr><tr><td>is N2C2-STS train data removing gold-standard labels,</td></tr><tr><td>Exp.2 is sampled from STS-PL with same score distri-</td></tr><tr><td>bution as Exp.1, Exp.3 is uniformly sampled from STS-</td></tr><tr><td>PL, Exp.4 is proportionally sampled from STS-PL and</td></tr><tr><td>Exp.5 is full STS-PL.</td></tr></table>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |