|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:54:40.401264Z" |
|
}, |
|
"title": "SpanBERT: Improving Pre-training by Representing and Predicting Spans", |
|
"authors": [ |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Washington", |
|
"location": { |
|
"settlement": "Seattle", |
|
"region": "WA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Princeton University", |
|
"location": { |
|
"settlement": "Princeton", |
|
"region": "NJ" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Weld", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Washington", |
|
"location": { |
|
"settlement": "Seattle", |
|
"region": "WA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Washington", |
|
"location": { |
|
"settlement": "Seattle", |
|
"region": "WA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present SpanBERT, a pre-training method that is designed to better represent and predict spans of text. Our approach extends BERT by (1) masking contiguous random spans, rather than random tokens, and (2) training the span boundary representations to predict the entire content of the masked span, without relying on the individual token representations within it. SpanBERT consistently outperforms BERT and our better-tuned baselines, with substantial gains on span selection tasks such as question answering and coreference resolution. In particular, with the same training data and model size as BERT large , our single model obtains 94.6% and 88.7% F1 on SQuAD 1.1 and 2.0 respectively. We also achieve a new state of the art on the OntoNotes coreference resolution task (79.6% F1), strong performance on the TACRED relation extraction benchmark, and even gains on GLUE. 1", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present SpanBERT, a pre-training method that is designed to better represent and predict spans of text. Our approach extends BERT by (1) masking contiguous random spans, rather than random tokens, and (2) training the span boundary representations to predict the entire content of the masked span, without relying on the individual token representations within it. SpanBERT consistently outperforms BERT and our better-tuned baselines, with substantial gains on span selection tasks such as question answering and coreference resolution. In particular, with the same training data and model size as BERT large , our single model obtains 94.6% and 88.7% F1 on SQuAD 1.1 and 2.0 respectively. We also achieve a new state of the art on the OntoNotes coreference resolution task (79.6% F1), strong performance on the TACRED relation extraction benchmark, and even gains on GLUE. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Pre-training methods like BERT have shown strong performance gains using self-supervised training that masks individual words or subword units. However, many NLP tasks involve reasoning about relationships between two or more spans of text. For example, in extractive question answering (Rajpurkar et al., 2016) , de-termining that the ''Denver Broncos'' is a type of ''NFL team'' is critical for answering the question ''Which NFL team won Super Bowl 50?'' Such spans provide a more challenging target for self supervision tasks, for example, predicting ''Denver Broncos'' is much harder than predicting only ''Denver'' when you know the next word is ''Broncos''. In this paper, we introduce a spanlevel pretraining approach that consistently outperforms BERT, with the largest gains on span selection tasks such as question answering and coreference resolution.", |
|
"cite_spans": [ |
|
{ |
|
"start": 287, |
|
"end": 311, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We present SpanBERT, a pre-training method that is designed to better represent and predict spans of text. Our method differs from BERT in both the masking scheme and the training objectives. First, we mask random contiguous spans, rather than random individual tokens. Second, we introduce a novel span-boundary objective (SBO) so the model learns to predict the entire masked span from the observed tokens at its boundary. Span-based masking forces the model to predict entire spans solely using the context in which they appear. Furthermore, the SBO encourages the model to store this span-level information at the boundary tokens, which can be easily accessed during the fine-tuning stage. Figure 1 illustrates our approach.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 694, |
|
"end": 702, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To implement SpanBERT, we build on a welltuned replica of BERT, which itself substantially outperforms the original BERT. While building on our baseline, we find that pre-training on single segments, instead of two half-length segments with the next sentence prediction (NSP) objective, Figure 1 : An illustration of SpanBERT training. The span an American football game is masked. The SBO uses the output representations of the boundary tokens, x 4 and x 9 (in blue), to predict each token in the masked span. The equation shows the MLM and SBO loss terms for predicting the token, football (in pink), which as marked by the position embedding p 3 , is the third token from x 4 . considerably improves performance on most downstream tasks. Therefore, we add our modifications on top of the tuned single-sequence BERT baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 287, |
|
"end": 295, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Together, our pre-training process yields models that outperform all BERT baselines on a wide variety of tasks, and reach substantially better performance on span selection tasks in particular. Specifically, our method reaches 94.6% and 88.7% F1 on SQuAD 1.1 and 2.0 (Rajpurkar et al., 2016 (Rajpurkar et al., , 2018 , respectively-reducing error by as much as 27% compared with our tuned BERT replica. We also observe similar gains on five additional extractive question answering benchmarks (NewsQA, TriviaQA, SearchQA, HotpotQA, and Natural Questions). 2 SpanBERT also arrives at a new state of the art on the challenging CoNLL-2012 (''OntoNotes'') shared task for document-level coreference resolution, where we reach 79.6% F1, exceeding the previous top model by 6.6% absolute. Finally, we demonstrate that SpanBERT also helps on tasks that do not explicitly involve span selection, and show that our approach even improves performance on TACRED (Zhang et al., 2017) and GLUE .", |
|
"cite_spans": [ |
|
{ |
|
"start": 267, |
|
"end": 290, |
|
"text": "(Rajpurkar et al., 2016", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 291, |
|
"end": 316, |
|
"text": "(Rajpurkar et al., , 2018", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 493, |
|
"end": 501, |
|
"text": "(NewsQA,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 511, |
|
"text": "TriviaQA,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 512, |
|
"end": 521, |
|
"text": "SearchQA,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 531, |
|
"text": "HotpotQA,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 532, |
|
"end": 557, |
|
"text": "and Natural Questions). 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 951, |
|
"end": 971, |
|
"text": "(Zhang et al., 2017)", |
|
"ref_id": "BIBREF54" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Whereas others show the benefits of adding more data and increasing model size (Lample and Conneau, 2019) , this work demonstrates the importance of designing good pre-training tasks and objectives, which can also have a remarkable impact.", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 105, |
|
"text": "(Lample and Conneau, 2019)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "BERT ) is a self-supervised approach for pre-training a deep transformer encoder (Vaswani et al., 2017) , before fine-tuning it for a particular downstream task. BERT optimizes two training objectives-masked language model (MLM) and next sentence prediction (NSP)which only require a large collection of unlabeled text.", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 103, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF47" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background: BERT", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Notation Given a sequence of word or subword tokens X = (x 1 , x 2 , . . . , x n ), BERT trains an encoder that produces a contextualized vector representation for each token:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background: BERT", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "enc(x 1 , x 2 , . . . , x n ) = x 1 , x 2 , . . . , x n .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background: BERT", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Masked Language Model Also known as a cloze test, MLM is the task of predicting missing tokens in a sequence from their placeholders. Specifically, a subset of tokens Y \u2286 X is sampled and substituted with a different set of tokens. In BERT's implementation, Y accounts for 15% of the tokens in X; of those, 80% are replaced with [MASK] , 10% are replaced with a random token (according to the unigram distribution), and 10% are kept unchanged. The task is to predict the original tokens in Y from the modified input. BERT selects each token in Y independently by randomly selecting a subset. In SpanBERT, we define Y by randomly selecting contiguous spans (Section 3.1).", |
|
"cite_spans": [ |
|
{ |
|
"start": 329, |
|
"end": 335, |
|
"text": "[MASK]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background: BERT", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Next Sentence Prediction The NSP task takes two sequences (X A , X B ) as input, and predicts whether X B is the direct continuation of X A . This is implemented in BERT by first reading X A from the corpus, and then (1) either reading X B from the point where X A ended, or (2) randomly sampling X B from a different point in the corpus. The two sequences are separated by a special [SEP] token. Additionally, a special [CLS] token is added to X A , X B to form the input, where the target of [CLS] is whether X B indeed follows X A in the corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background: BERT", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In summary, BERT optimizes the MLM and the NSP objectives by masking word pieces uniformly at random in data generated by the bi-sequence sampling procedure. In the next section, we will present our modifications to the data pipeline, masking, and pre-training objectives.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background: BERT", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We present SpanBERT, a self-supervised pretraining method designed to better represent and predict spans of text. Our approach is inspired by BERT , but deviates from its bi-text classification framework in three ways. First, we use a different random process to mask spans of tokens, rather than individual ones. We also introduce a novel auxiliary objective-the SBO-which tries to predict the entire masked span using only the representations of the tokens at the span's boundary. Finally, SpanBERT samples a single contiguous segment of text for each training example (instead of two), and thus does not use BERT's next sentence prediction objective, which we omit.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Given a sequence of tokens X = (x 1 , x 2 , . . . , x n ), we select a subset of tokens Y \u2286 X by iteratively sampling spans of text until the masking budget (e.g., 15% of X) has been spent. At each iteration, we first sample a span length (number of words) from a geometric distribution \u223c Geo(p), which is skewed towards shorter spans. We then randomly (uniformly) select the starting point for the span to be masked. We always sample a sequence of complete words (instead of subword tokens) and the starting point must be the beginning of one word. Following preliminary trials, 3 we set Figure 2 : We sample random span lengths from a geometric distribution \u223c Geo(p = 0.2) clipped at max = 10. p = 0.2, and also clip at max = 10. This yields a mean span length of mean ( ) = 3.8. Figure 2 shows the distribution of span mask lengths.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 589, |
|
"end": 597, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 782, |
|
"end": 790, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Span Masking", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As in BERT, we also mask 15% of the tokens in total: replacing 80% of the masked tokens with [MASK], 10% with random tokens, and 10% with the original tokens. However, we perform this replacement at the span level and not for each token individually; that is, all the tokens in a span are replaced with [MASK] or sampled tokens.", |
|
"cite_spans": [ |
|
{ |
|
"start": 303, |
|
"end": 309, |
|
"text": "[MASK]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Masking", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Span selection models (Lee et al., 2016 (Lee et al., , 2017 typically create a fixed-length representation of a span using its boundary tokens (start and end). To support such models, we would ideally like the representations for the end of the span to summarize as much of the internal span content as possible. We do so by introducing a span boundary objective that involves predicting each token of a masked span using only the representations of the observed tokens at the boundaries (Figure 1) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 39, |
|
"text": "(Lee et al., 2016", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 40, |
|
"end": 59, |
|
"text": "(Lee et al., , 2017", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 488, |
|
"end": 498, |
|
"text": "(Figure 1)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Span Boundary Objective", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Formally, we denote the output of the transformer encoder for each token in the sequence by x 1 , . . . , x n . Given a masked span of tokens (x s , . . . , x e ) \u2208 Y , where (s, e) indicates its start and end positions, we represent each token x i in the span using the output encodings of the external boundary tokens x s\u22121 and x e+1 , as well as the position embedding of the target token p i\u2212s+1 :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Boundary Objective", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "y i = f (x s\u22121 , x e+1 , p i\u2212s+1 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Boundary Objective", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where position embeddings p 1 , p 2 , . . . mark relative positions of the masked tokens with respect to the left boundary token x s\u22121 . We implement the representation function f (\u2022) as a 2-layer feed-forward network with GeLU activations (Hendrycks and Gimpel, 2016) and layer normalization (Ba et al., 2016) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 268, |
|
"text": "(Hendrycks and Gimpel, 2016)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 293, |
|
"end": 310, |
|
"text": "(Ba et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Boundary Objective", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "h 0 = [x s\u22121 ; x e+1 ; p i\u2212s+1 ] h 1 = LayerNorm (GeLU(W 1 h 0 )) y i = LayerNorm (GeLU(W 2 h 1 ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Boundary Objective", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We then use the vector representation y i to predict the token x i and compute the cross-entropy loss exactly like the MLM objective.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Boundary Objective", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "SpanBERT sums the loss from both the span boundary and the regular masked language model objectives for each token x i in the masked span (x s , . . . , x e ), while reusing the input embedding (Press and Wolf, 2017) for the target tokens in both MLM and SBO:", |
|
"cite_spans": [ |
|
{ |
|
"start": 194, |
|
"end": 216, |
|
"text": "(Press and Wolf, 2017)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Boundary Objective", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "L(x i ) = L MLM (x i ) + L SBO (x i ) = \u2212 log P (x i | x i ) \u2212 log P (x i | y i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Boundary Objective", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As described in Section 2, BERT's examples contain two sequences of text (X A , X B ), and an objective that trains the model to predict whether they are connected (NSP). We find that this setting is almost always worse than simply using a single sequence without the NSP objective (see Section 5 for further details). We conjecture that single-sequence training is superior to bi-sequence training with NSP because (a) the model benefits from longer full-length contexts, or (b) conditioning on, often unrelated, context from another document adds noise to the masked language model. Therefore, in our approach, we remove both the NSP objective and the two-segment sampling procedure, and simply sample a single contiguous segment of up to n = 512 tokens, rather than two half-segments that sum up to n tokens together.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Single-Sequence Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In summary, SpanBERT pre-trains span representations by: (1) masking spans of full words using a geometric distribution based masking scheme (Section 3.1), (2) optimizing an auxiliary span-boundary objective (Section 3.2) in addition to MLM using a single-sequence data pipeline (Section 3.3). A procedural description can be found in Appendix A.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Single-Sequence Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We evaluate on a comprehensive suite of tasks, including seven question answering tasks, coreference resolution, nine tasks in the GLUE benchmark , and relation extraction. We expect that the span selection tasks, question answering and coreference resolution, will particularly benefit from our span-based pre-training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Extractive Question Answering Given a short passage of text and a question as input, the task of extractive question answering is to select a contiguous span of text in the passage as the answer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We first evaluate on SQuAD 1.1 and 2.0 (Rajpurkar et al., 2016 (Rajpurkar et al., , 2018 , which have served as major question answering benchmarks, particularly for pre-trained models (Peters et al., 2018; . We also evaluate on five more datasets from the MRQA shared task (Fisch et al., 2019) 4 : NewsQA (Trischler et al., 2017) , SearchQA (Dunn et al., 2017) , TriviaQA (Joshi et al., 2017) , HotpotQA (Yang et al., 2018) , and Natural Questions . Because the MRQA shared task does not have a public test set, we split the development set in half to make new development and test sets. The datasets vary in both domain and collection methodology, making this collection a good test bed for evaluating whether our pre-trained models can generalize well across different data distributions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 62, |
|
"text": "(Rajpurkar et al., 2016", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 63, |
|
"end": 88, |
|
"text": "(Rajpurkar et al., , 2018", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 185, |
|
"end": 206, |
|
"text": "(Peters et al., 2018;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 294, |
|
"text": "(Fisch et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 330, |
|
"text": "(Trischler et al., 2017)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 361, |
|
"text": "(Dunn et al., 2017)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 393, |
|
"text": "(Joshi et al., 2017)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 424, |
|
"text": "(Yang et al., 2018)", |
|
"ref_id": "BIBREF53" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Following BERT , we use the same QA model architecture for all the datasets. We first convert the passage [SEP] , pass it to the pre-trained transformer encoder, and train two linear classifiers independently on top of it for predicting the answer span boundary (start and end). For the unanswerable questions in SQuAD 2.0, we simply set the answer span to be the special token [CLS] for both training and testing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 111, |
|
"text": "[SEP]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "P = (p 1 , p 2 , . . . , p l ) and question Q = (q 1 , q 2 , . . . , q l ) into a single sequence X = [CLS]p 1 p 2 . . . p l [SEP] q 1 q 2 . . . q l", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Coreference Resolution Coreference resolution is the task of clustering mentions in text which refer to the same real-world entities. We evaluate on the CoNLL-2012 shared task (Pradhan et al., 2012) for document-level coreference resolution. We use the independent version of the Joshi et al. (2019b) implementation of the higher-order coreference model . The document is divided into non-overlapping segments of a pre-defined length. 5 Each segment is encoded independently by the pre-trained transformer encoder, which replaces the original LSTM-based encoder. For each mention span x, the model learns a distribution P (\u2022) over possible antecedent spans Y : x,y) y \u2208Y e s(x,y ) The span pair scoring function s(x, y) is a feedforward neural network over fixed-length span representations and hand-engineered features over x and y:", |
|
"cite_spans": [ |
|
{ |
|
"start": 661, |
|
"end": 665, |
|
"text": "x,y)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "P (y) = e s(", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "s(x, y) = s m (x) + s m (y) + s c (x, y) s m (x) = FFNN m (g x ) s c (x, y) = FFNN c (g x , g y , \u03c6(x, y))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Here g x and g y denote the span representations, which are a concatenation of the two transformer output states of the span endpoints and an attention vector computed over the output representations of the token in the span. FFNN m and FFNN c represent two feedforward neural networks with one hidden layer, and \u03c6(x, y) represents the handengineered features (e.g., speaker and genre information). A more detailed description of the model can be found in .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Relation Extraction TACRED (Zhang et al., 2017 ) is a challenging relation extraction dataset. Given one sentence and two spans within itsubject and object-the task is to predict the relation between the spans from 42 pre-defined relation types, including no relation. We follow the entity masking schema from Zhang et al. (2017) GLUE The General Language Understanding Evaluation (GLUE) benchmark consists of 9 sentence-level classification tasks:", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 46, |
|
"text": "(Zhang et al., 2017", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 310, |
|
"end": 329, |
|
"text": "Zhang et al. (2017)", |
|
"ref_id": "BIBREF54" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Two sentence-level classification tasks including CoLA (Warstadt et al., 2018) for evaluating linguistic acceptability and SST-2 (Socher et al., 2013) for sentiment classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 80, |
|
"text": "(Warstadt et al., 2018)", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 131, |
|
"end": 152, |
|
"text": "(Socher et al., 2013)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Three sentence-pair similarity tasks including MRPC (Dolan and Brockett, 2005), a binary paraphrasing task sentence pairs from news sources, STS-B (Cer et al., 2017 ), a graded similarity task for news headlines, and QQP, 6 a binary paraphrasing tasking between Quora question pairs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 149, |
|
"end": 166, |
|
"text": "(Cer et al., 2017", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Four natural language inference tasks including MNLI (Williams et al., 2018) , QNLI (Rajpurkar et al., 2016) , RTE (Dagan et al., 2005; Bar-Haim et al., 2006; Giampiccolo et al., 2007) , and WNLI (Levesque et al., 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 78, |
|
"text": "(Williams et al., 2018)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 86, |
|
"end": 110, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 137, |
|
"text": "RTE (Dagan et al., 2005;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 160, |
|
"text": "Bar-Haim et al., 2006;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 186, |
|
"text": "Giampiccolo et al., 2007)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 198, |
|
"end": 221, |
|
"text": "(Levesque et al., 2011)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Unlike question answering, coreference resolution, and relation extraction, these sentence-level tasks do not require explicit modeling of spanlevel semantics. However, they might still benefit from implicit span-based reasoning (e.g., the Prime Minister is the head of the government). Following previous work Radford et al., 2018 ), 7 we exclude WNLI from the results to enable a fair comparison. Although recent work Liu et al. (2019a) has applied several task-specific strategies to increase performance on the individual GLUE tasks, we follow BERT's single-task setting and only add a linear classifier on top of the [CLS] token for these classification tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 331, |
|
"text": "Radford et al., 2018", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We reimplemented BERT's model and pretraining method in fairseq (Ott et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 82, |
|
"text": "(Ott et al., 2019)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We used the model configuration of BERT large as in and also pre-trained all our models on the same corpus: BooksCorpus and English Wikipedia using cased Wordpiece tokens. Compared with the original BERT implementation, the main differences in our implementation include: (a) We use different masks at each epoch while BERT samples 10 different masks for each sequence during data processing. (b) We remove all the short-sequence strategies used before (they sampled shorter sequences with a small probability 0.1; they also first pre-trained with smaller sequence length of 128 for 90% of the steps). Instead, we always take sequences of up to 512 tokens until it reaches a document boundary. We refer readers to Liu et al. (2019b) for further discussion on these modifications and their effects.", |
|
"cite_spans": [ |
|
{ |
|
"start": 714, |
|
"end": 732, |
|
"text": "Liu et al. (2019b)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As in BERT, the learning rate is warmed up over the first 10,000 steps to a peak value of 1e-4, and then linearly decayed. We retain \u03b2 hyperparameters (\u03b2 1 = 0.9, \u03b2 2 = 0.999) and a decoupled weight decay (Loshchilov and Hutter, 2019) of 0.1. We also keep a dropout of 0.1 on all layers and attention weights, and a GeLU activation function (Hendrycks and Gimpel, 2016) . We deviate from the optimization by running for 2.4M steps and using an epsilon of 1e-8 for AdamW (Kingma and Ba, 2015) , which converges to a better set of model parameters. Our implementation uses a batch size of 256 sequences with a maximum of 512 tokens. 8 For the SBO, we use 200 dimension position embeddings p 1 , p 2 , . . . to mark positions relative to the left boundary token. The pre-training was done on 32 Volta V100 GPUs and took 15 days to complete. Fine-tuning is implemented based on Hugging-Face's codebase (Wolf et al., 2019) and more details are given in Appendix B.", |
|
"cite_spans": [ |
|
{ |
|
"start": 341, |
|
"end": 369, |
|
"text": "(Hendrycks and Gimpel, 2016)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 491, |
|
"text": "(Kingma and Ba, 2015)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 898, |
|
"end": 917, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF51" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We compare SpanBERT to three baselines:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Google BERT The pre-trained models released by Devlin et al. (2019). 9 Our BERT Our reimplementation of BERT with improved data preprocessing and optimization (Section 4.2). Our BERT-1seq Our reimplementation of BERT trained on single full-length sequences without NSP (Section 3.3).", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 70, |
|
"text": "Devlin et al. (2019). 9", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We compare SpanBERT to the baselines per task, and draw conclusions based on the overall trends.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Extractive Question Answering Table 1 shows the performance on both SQuAD 1.1 and 2.0. SpanBERT exceeds our BERT baseline by 2.0% and 2.8% F1, respectively (3.3% and 5.4% over Google BERT). In SQuAD 1.1, this result accounts for over 27% error reduction, reaching 3.4% F1 above human performance. Table 2 demonstrates that this trend goes beyond SQuAD, and is consistent in every MRQA dataset. On average, we see a 2.9% F1 improvement from our reimplementation of BERT. Although some gains are coming from single-sequence training (+1.1%), most of the improvement stems from span masking and the span boundary objective (+1.8%), with particularly large gains on TriviaQA (+3.2%) and HotpotQA (+2.7%). Table 3 shows the performance on the OntoNotes coreference resolution benchmark. Our BERT reimplementation improves the Google BERT model by 1.2% on the average F1 metric and single-sequence training brings another 0.5% gain. Finally, SpanBERT improves considerably on top of that, achieving a new state of the art of 79.6% F1 (previous best result is 73.0%). Table 4 shows the performance on TACRED. SpanBERT exceeds our reimplementation of BERT by 3.3% F1 and achieves close to the current state of the art (Soares et al., 2019)-Our model performs better than their BERT EM but is 0.7 point behind BERT EM + MTB, which used entity-linked text for additional pre-training. Most of this gain (+2.6%) stems from single-sequence training although the contribution of span masking and the span boundary objective is still a considerable 0.7%, resulting largely from higher recall.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 37, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 297, |
|
"end": 304, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 701, |
|
"end": 708, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 1061, |
|
"end": 1068, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Per-Task Results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "GLUE Table 5 shows the performance on GLUE. For most tasks, the different models appear to perform similarly. Moving to single-sequence training without the NSP objective substantially improves CoLA, and yields smaller (but considerable) improvements on MRPC and MNLI. The main gains from SpanBERT are in the SQuADbased QNLI dataset (+1.3%) and in RTE (+6.9%), the latter accounting for most of the rise in SpanBERT's GLUE average.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 12, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Relation Extraction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We compared our approach to three BERT baselines on 17 benchmarks, and found that SpanBERT outperforms BERT on almost every task. In 14 tasks, SpanBERT performed better than all baselines. In two tasks (MRPC and QQP), it performed on-par in terms of accuracy with single-sequence trained BERT, but still outperformed the other baselines. In one task (SST-2), Google's BERT baseline performed better than SpanBERT by 0.4% accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Trends", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "When considering the magnitude of the gains, it appears that SpanBERT is especially better at extractive question answering. In SQuAD 1.1, for example, we observe a solid gain of 2.0% F1 even though the baseline is already well above human performance. On MRQA, SpanBERT improves between 2.0% (Natural Questions) and 4.6% (TriviaQA) F1 on top of our BERT baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Trends", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Finally, we observe that single-sequence training works considerably better than bi-sequence Table 5 : Test set performance on GLUE tasks. MRPC: F1/accuracy, STS-B: Pearson/Spearmanr correlation, QQP: F1/accuracy, MNLI: matched/mistached accuracies, and accuracy for all the other tasks. WNLI (not shown) is always set to majority class (65.1% accuracy) and included in the average.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 100, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overall Trends", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "training with NSP with BERT's choice of sequence lengths for a wide variety of tasks. This is surprising because BERT's ablations showed gains from the NSP objective . However, the ablation studies still involved bisequence data processing (i.e., the pre-training stage only controlled for the NSP objective while still sampling two half-length sequences). We hypothesize that bi-sequence training, as it is implemented in BERT (see Section 2), impedes the model from learning longer-range features, and consequently hurts performance on many downstream tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Trends", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We compare our random span masking scheme with linguistically-informed masking schemes, and find that masking random spans is a competitive and often better approach. We then study the impact of the SBO, and contrast it with BERT's NSP objective. 10", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Studies", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Previous work (Sun et al., 2019) has shown improvements in downstream task performance by masking linguistically informed spans during pretraining for Chinese data. We compare our random span masking scheme with masking of linguistically informed spans. Specifically, we train the following five baseline models differing only in the way tokens are masked.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Masking Schemes", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "We sample random Wordpiece tokens, as in the original BERT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subword Tokens", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We sample random words, and then mask all of the subword tokens in those words. The total number of masked subtokens is around 15%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Whole Words", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "10 To save time and resources, we use the checkpoints at 1.2M steps for all the ablation experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Whole Words", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Named Entities At 50% of the time, we sample from named entities in the text, and sample random whole words for the other 50%. The total number of masked subtokens is 15%. Specifically, we run spaCy's named entity recognizer (Honnibal and Montani, 2017) 11 on the corpus and select all the non-numerical named entity mentions as candidates.", |
|
"cite_spans": [ |
|
{ |
|
"start": 225, |
|
"end": 253, |
|
"text": "(Honnibal and Montani, 2017)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Whole Words", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Noun Phrases Similar to Named Entities, we sample from noun phrases at 50% of the time. The noun phrases are extracted by running spaCy's constituency parser.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Whole Words", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We sample random spans from a geometric distribution, as in our SpanBERT (see Section 3.1). Table 6 shows how different pre-training masking schemes affect performance on the development set of a selection of tasks. All the models are evaluated on the development sets and are based on the default BERT setup of bi-sequence training with NSP; the results are not directly comparable to the main evaluation. With the exception of coreference resolution, masking random spans is preferable to other strategies. Although linguistic masking schemes (named entities and noun phrases) are often competitive with random spans, their performance is not consistent; for instance, masking noun phrases achieves parity with random spans on NewsQA, but underperforms on TriviaQA (\u22121.1% F1).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 99, |
|
"text": "Table 6", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Geometric Spans", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "On coreference resolution, we see that masking random subword tokens is preferable to any form of span masking. Nevertheless, we shall see in the following experiment that combining random span masking with the span boundary objective can improve upon this result considerably.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Geometric Spans", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In Section 5, we saw that bi-sequence training with the NSP objective can hurt performance on 11 https://spacy.io/. downstream tasks, when compared with singlesequence training. We test whether this holds true for models pre-trained with span masking, and also evaluate the effect of replacing the NSP objective with the SBO. Table 7 confirms that single-sequence training typically improves performance. Adding SBO further improves performance, with a substantial gain on coreference resolution (+2.7% F1) over span masking alone. Unlike the NSP objective, SBO does not appear to have any adverse effects.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 326, |
|
"end": 333, |
|
"text": "Table 7", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Auxiliary Objectives", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Pre-trained contextualized word representations that can be trained from unlabeled text (Dai and Le, 2015; Melamud et al., 2016; Peters et al., 2018) have had immense impact on NLP lately, particularly as methods for initializing a large model before fine-tuning it for a specific task (Howard and Ruder, 2018; Radford et al., 2018; . Beyond differences in model hyperparameters and corpora, these methods mainly differ in their pre-training tasks and loss functions, with a considerable amount of contemporary literature proposing augmentations of BERT's MLM objective.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 106, |
|
"text": "(Dai and Le, 2015;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 107, |
|
"end": 128, |
|
"text": "Melamud et al., 2016;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 129, |
|
"end": 149, |
|
"text": "Peters et al., 2018)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 310, |
|
"text": "(Howard and Ruder, 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 332, |
|
"text": "Radford et al., 2018;", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "While previous and concurrent work has looked at masking or dropping (Song et al., 2019; Chan et al., 2019) multiple words from the input-particularly as pretraining for lan-guage generation tasks-SpanBERT pretrains span representations (Lee et al., 2016) , which are widely used for question answering, coreference resolution, and a variety of other tasks. ERNIE shows improvements on Chinese NLP tasks using phrase and named entity masking. MASS (Song et al., 2019) focuses on language generation tasks, and adopts the encoder-decoder framework to reconstruct a sentence fragment given the remaining part of the sentence. We attempt to more explicitly model spans using the SBO objective, and show that (geometrically distributed) random span masking works as well, and sometimes better than, masking linguisticallycoherent spans. We evaluate on English benchmarks for question answering, relation extraction, and coreference resolution in addition to GLUE.", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 88, |
|
"text": "(Song et al., 2019;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 89, |
|
"end": 107, |
|
"text": "Chan et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 255, |
|
"text": "(Lee et al., 2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 448, |
|
"end": 467, |
|
"text": "(Song et al., 2019)", |
|
"ref_id": "BIBREF43" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "A different ERNIE focuses on integrating structured knowledge bases with contextualized representations with an eye on knowledge-driven tasks like entity typing and relation classification. UNILM (Dong et al., 2019) uses multiple language modeling objectivesunidirectional (both left-to-right and right-to-left), bidirectional, and sequence-to-sequence predictionto aid generation tasks like summarization and question generation. XLM (Lample and Conneau, 2019) explores cross-lingual pre-training for multilingual tasks such as translation and cross-lingual classification. Kermit (Chan et al., 2019) , an insertion based approach, fills in missing tokens (instead of predicting masked ones) during pretraining; they show improvements on machine translation and zero-shot question answering.", |
|
"cite_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 215, |
|
"text": "(Dong et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 461, |
|
"text": "(Lample and Conneau, 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 582, |
|
"end": 601, |
|
"text": "(Chan et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Concurrent with our work, RoBERTa (Liu et al., 2019b ) presents a replication study of BERT pre-training that measures the impact of many key hyperparameters and training data size. Also concurrent, XLNet combines an autoregressive loss and the Transformer-XL architecture with a more than an eight-fold increase in data to achieve current stateof-the-art results on multiple benchmarks. XLNet also masks spans (of 1-5 tokens) during pretraining, but predicts them autoregressively. Our model focuses on incorporating span-based pretraining, and as a side effect, we present a stronger BERT baseline while controlling for the corpus, architecture, and the number of parameters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 52, |
|
"text": "(Liu et al., 2019b", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Related to our SBO objective, pair2vec (Joshi et al., 2019a) encodes word-pair relations using a negative sampling-based multivariate objective during pre-training. Later, the word-pair representations are injected into the attention-layer of downstream tasks, and thus encode limited downstream context. Unlike pair2vec, our SBO objective yields ''pair'' (start and end tokens of spans) representations which more fully encode the context during both pre-training and finetuning, and are thus more appropriately viewed as span representations. Stern et al. (2018) focus on improving language generation speed using a block-wise parallel decoding scheme; they make predictions for multiple time steps in parallel and then back off to the longest prefix validated by a scoring model. Also related are sentence representation methods (Kiros et al., 2015; Logeswaran and Lee, 2018) , which focus on predicting surrounding contexts from sentence embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 60, |
|
"text": "(Joshi et al., 2019a)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 545, |
|
"end": 564, |
|
"text": "Stern et al. (2018)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 832, |
|
"end": 852, |
|
"text": "(Kiros et al., 2015;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 853, |
|
"end": 878, |
|
"text": "Logeswaran and Lee, 2018)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We presented a new method for span-based pretraining which extends BERT by (1) masking contiguous random spans, rather than random tokens, and (2) training the span boundary representations to predict the entire content of the masked span, without relying on the individual token representations within it. Together, our pretraining process yields models that outperform all BERT baselines on a variety of tasks, and reach substantially better performance on span selection tasks in particular.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We describe our pre-training procedure as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendices A Pre-training Procedure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1. Divide the corpus into single contiguous blocks of up to 512 tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendices A Pre-training Procedure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "2. At each step of pre-training:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendices A Pre-training Procedure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(a) Sample a batch of blocks uniformly at random. (b) Mask 15% of word pieces in each block in the batch using the span masking scheme (Section 3.1). (c) For each masked token", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendices A Pre-training Procedure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "x i , opti- mize L(x i ) = L MLM (x i ) + L SBO (x i ) (Section 3.2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendices A Pre-training Procedure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We apply the following fine-tuning hyperparameters to all methods, including the baselines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Fine-tuning Hyperparameters", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Extractive Question Answering For all the question answering tasks, we use max seq length = 512 and a sliding window of size 128 if the lengths are longer than 512. We choose learning rates from {5e-6, 1e-5, 2e-5, 3e-5, 5e-5} and batch sizes from {16, 32} and fine-tune four epochs for all the datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Fine-tuning Hyperparameters", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We divide the documents into multiple chunks of lengths up to max seq length and encode each chunk independently. We choose max seq length from {128, 256, 384, 512}, BERT learning rates from {1e-5, 2e-5}, task-specific learning rates from {1e-4, 2e-4, 3e-4}, and fine-tune 20 epochs for all the datasets. We use batch size = 1 (one document) for all the experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coreference Resolution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "TACRED/GLUE We use max seq length = 128 and choose learning rates from {5e-6, 1e-5, 2e-5, 3e-5, 5e-5} and batch sizes from {16, 32} and fine-tuning 10 epochs for all the datasets. The only exception is CoLA, where we used four epochs (following , because 10 epochs lead to severe overfitting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coreference Resolution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the modified MRQA version of these datasets. See more details in Section 4.1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We experimented with p = {0.1, 0.2, 0.4} and found 0.2 to perform the best.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/mrqa/MRQA-Shared-Task-2019. MRQA changed the original datasets to unify them into the same format, e.g., all the contexts are truncated to a maximum of 800 tokens and only answerable questions are kept.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The length was chosen from {128, 256, 384, 512}. See more details in Appendix B.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs.7 Previous work has excluded WNLI on account of construction issues outlined on the GLUE websitehttps:// gluebenchmark.com/faq.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "On the average, this is approximately 390 sequences, because some documents have fewer than 512 tokens. 9 https://github.com/google-research/bert.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Pranav Rajpurkar and Robin Jia for patiently helping us evaluate SpanBERT on SQuAD. We thank the anonymous reviewers, the action editor, and our colleagues at Facebook AI Research and the University of Washington for their insightful feedback that helped improve the paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Layer normalization", |
|
"authors": [ |
|
{ |
|
"first": "Jimmy", |
|
"middle": [ |
|
"Lei" |
|
], |
|
"last": "Ba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [ |
|
"Ryan" |
|
], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1607.06450" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E. Hinton. 2016. Layer normalization. arXiv pre- print arXiv:1607.06450.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The second PASCAL recognising textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Bar-Haim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lisa", |
|
"middle": [], |
|
"last": "Ferro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Giampiccolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Idan", |
|
"middle": [], |
|
"last": "Szpektor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Second PASCAL Challenges Workshop on Recognising Textual Entailment", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6--10", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roy Bar-Haim, Ido Dagan, Bill Dolan, Lisa Ferro, Danilo Giampiccolo, Bernardo Magnini, and Idan Szpektor. 2006. The second PASCAL recognising textual entailment challenge. In Proceedings of the Second PASCAL Challenges Workshop on Recognising Textual Entailment, pages 6-4.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Semantic textual similarity multilingual and crosslingual focused evaluation", |
|
"authors": [], |
|
"year": null, |
|
"venue": "International Workshop on Semantic Evaluation (SemEval)", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Semeval-2017 task 1: Semantic textual similar- ity multilingual and crosslingual focused eval- uation. In International Workshop on Semantic Evaluation (SemEval), pages 1-14. Vancouver, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "KERMIT: Generative insertion-based modeling for sequences", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Chan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Kitaev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kelvin", |
|
"middle": [], |
|
"last": "Guu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Stern", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.01604" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Chan, Nikita Kitaev, Kelvin Guu, Mitchell Stern, and Jakob Uszkoreit. 2019. KERMIT: Generative insertion-based modeling for sequences. arXiv preprint arXiv:1906.01604.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The PASCAL recognising textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Ido Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Glickman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Machine Learning Challenges Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--190", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ido Dagan, Oren Glickman, and Bernardo Magnini. 2005. The PASCAL recognising tex- tual entailment challenge. In Machine Learning Challenges Workshop, pages 177-190. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Semisupervised sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Andrew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3079--3087", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew M. Dai and Quoc V. Le. 2015. Semi- supervised sequence learning. In Advances in Neural Information Processing Systems (NIPS), pages 3079-3087.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Transformer-XL: Attentive language models beyond a fixedlength context", |
|
"authors": [ |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zihang Dai, Zhilin Yang, Yiming Yang, William W. Cohen, Jaime Carbonell, Quoc V. Le, and Ruslan Salakhutdinov. 2019. Transformer-XL: Attentive language models beyond a fixed- length context. In Association for Computa- tional Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "North American Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In North American Association for Computational Linguistics (NAACL).", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Automatically constructing a corpus of sentential paraphrases", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "William", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the International Workshop on Paraphrasing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William B. Dolan and Chris Brockett. 2005. Automatically constructing a corpus of sen- tential paraphrases. In Proceedings of the Inter- national Workshop on Paraphrasing.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Unified language model pre-training for natural language understanding and generation", |
|
"authors": [ |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenhui", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hsiao-Wuen", |
|
"middle": [], |
|
"last": "Hon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2019. Unified language model pre-training for natural lan- guage understanding and generation. In Ad- vances in Neural Information Processing Systems (NIPS).", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "SearchQA: A new Q&A dataset augmented with context from a search engine", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Dunn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Levent", |
|
"middle": [], |
|
"last": "Sagun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Higgins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"Ugur" |
|
], |
|
"last": "Guney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Volkan", |
|
"middle": [], |
|
"last": "Cirik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.05179" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Dunn, Levent Sagun, Mike Higgins, V. Ugur Guney, Volkan Cirik, and Kyunghyun Cho. 2017. SearchQA: A new Q&A dataset augmented with context from a search engine. arXiv preprint arXiv:1704.05179.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "MRQA 2019 shared task: Evaluating generalization in reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Fisch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Talmor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minjoon", |
|
"middle": [], |
|
"last": "Seo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of 2nd Machine Reading for Reading Comprehension (MRQA) Workshop at EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Fisch, Alon Talmor, Robin Jia, Minjoon Seo, Eunsol Choi, and Danqi Chen. 2019. MRQA 2019 shared task: Evaluating general- ization in reading comprehension. In Proceed- ings of 2nd Machine Reading for Reading Comprehension (MRQA) Workshop at EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "The third PASCAL recognizing textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Giampiccolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danilo Giampiccolo, Bernardo Magnini, Ido Dagan, and Bill Dolan. 2007. The third PASCAL recognizing textual entailment challenge. In Pro- ceedings of the ACL-PASCAL Workshop on Tex- tual Entailment and Paraphrasing, pages 1-9.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Jointly predicting predicates and arguments in neural semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "364--369", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luheng He, Kenton Lee, Omer Levy, and Luke Zettlemoyer. 2018. Jointly predicting predicates and arguments in neural semantic role labeling. In Association for Computational Linguistics (ACL), pages 364-369.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Gaussian error linear units (gelus)", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Hendrycks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.08415" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Hendrycks and Kevin Gimpel. 2016. Gaussian error linear units (gelus). arXiv pre- print arXiv:1606.08415.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Honnibal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ines", |
|
"middle": [], |
|
"last": "Montani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Honnibal and Ines Montani. 2017. spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing. To appear.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Universal language model fine-tuning for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Howard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1801.06146" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Uni- versal language model fine-tuning for text clas- sification. arXiv preprint arXiv:1801.06146.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "pair2vec: Compositional word-pair embeddings for cross-sentence inference", |
|
"authors": [ |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Weld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "North American Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3597--3608", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mandar Joshi, Eunsol Choi, Omer Levy, Daniel Weld, and Luke Zettlemoyer. 2019a. pair2vec: Compositional word-pair embeddings for cross-sentence inference. In North American Association for Computational Linguistics (NAACL), pages 3597-3608.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "TriviaQA: A large scale distantly supervised challenge dataset for reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Weld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1601--1611", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mandar Joshi, Eunsol Choi, Daniel Weld, and Luke Zettlemoyer. 2017. TriviaQA: A large scale distantly supervised challenge dataset for reading comprehension. In Association for Com- putational Linguistics (ACL), pages 1601-1611.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "BERT for coreference resolution: Baselines and analysis", |
|
"authors": [ |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Weld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mandar Joshi, Omer Levy, Daniel S. Weld, Luke Zettlemoyer, and Omer Levy. 2019b. BERT for coreference resolution: Baselines and analysis. In Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "Diederik", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In Inter- national Conference on Learning Representa- tions (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Skip-thought vectors", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raquel", |
|
"middle": [], |
|
"last": "Urtasun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan Kiros, Yukun Zhu, Ruslan R. Salakhutdinov, Richard S. Zemel, Antonio Torralba, Raquel Urtasun, and Sanja Fidler. 2015. Skip-thought vectors. In Advances in Neural Information Processing Systems (NIPS).", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Natural questions: A benchmark for question answering research. Transactions of the Association of Computational Linguistics (TACL)", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennimaria", |
|
"middle": [], |
|
"last": "Palomaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivia", |
|
"middle": [], |
|
"last": "Redfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Alberti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danielle", |
|
"middle": [], |
|
"last": "Epstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Kelcey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Matthew Kelcey, Jacob Devlin, Kenton Lee, Kristina N. Toutanova, Llion Jones, Ming- Wei Chang, Andrew Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. 2019. Natural questions: A benchmark for question answering research. Transactions of the Association of Computational Linguistics (TACL).", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Cross-lingual language model pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample and Alexis Conneau. 2019. Cross-lingual language model pretraining. Advances in Neural Information Processing Systems (NIPS).", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "End-to-end neural coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "188--197", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenton Lee, Luheng He, Mike Lewis, and Luke Zettlemoyer. 2017. End-to-end neural coreference resolution. In Empirical Methods in Natural Language Processing (EMNLP), pages 188-197.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Higher-order coreference resolution with coarse-to-fine inference", |
|
"authors": [ |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "North American Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "687--692", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenton Lee, Luheng He, and Luke Zettlemoyer. 2018. Higher-order coreference resolution with coarse-to-fine inference. In North American Association for Computational Linguistics (NAACL), pages 687-692.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Learning recurrent span representations for extractive question answering", |
|
"authors": [ |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shimi", |
|
"middle": [], |
|
"last": "Salant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.01436" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenton Lee, Shimi Salant, Tom Kwiatkowski, Ankur Parikh, Dipanjan Das, and Jonathan Berant. 2016. Learning recurrent span repre- sentations for extractive question answering. arXiv preprint arXiv:1611.01436.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "The Winograd schema challenge", |
|
"authors": [ |
|
{ |
|
"first": "Hector", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Levesque", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ernest", |
|
"middle": [], |
|
"last": "Davis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leora", |
|
"middle": [], |
|
"last": "Morgenstern", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "AAAI Spring Symposium: Logical Formalizations of Commonsense Reasoning", |
|
"volume": "46", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hector J. Levesque, Ernest Davis, and Leora Morgenstern. 2011. The Winograd schema challenge. In AAAI Spring Symposium: Logical Formalizations of Commonsense Reasoning, volume 46, page 47.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Multi-task deep neural networks for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pengcheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weizhu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics. Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaodong Liu, Pengcheng He, Weizhu Chen, and Jianfeng Gao. 2019a. Multi-task deep neural networks for natural language understanding. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics. As- sociation for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "RoBERTa: A robustly optimized BERT pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019b. RoBERTa: A robustly opti- mized BERT pretraining approach. arxiv pre- print arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "An efficient framework for learning sentence representations", |
|
"authors": [ |
|
{ |
|
"first": "Lajanugen", |
|
"middle": [], |
|
"last": "Logeswaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Honglak", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1803.02893" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lajanugen Logeswaran and Honglak Lee. 2018. An efficient framework for learning sentence representations. arxiv preprint arXiv:1803.02893.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Decoupled weight decay regularization", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Loshchilov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Hutter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Loshchilov and Frank Hutter. 2019. Decou- pled weight decay regularization. In Interna- tional Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "context2vec: Learning generic context embedding with bidirectional LSTM", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Melamud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Goldberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "51--61", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oren Melamud, Jacob Goldberger, and Ido Dagan. 2016. context2vec: Learning generic context embedding with bidirectional LSTM. In Com- putational Natural Language Learning (CoNLL), pages 51-61.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "fairseq: A fast, extensible toolkit for sequence modeling", |
|
"authors": [ |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexei", |
|
"middle": [], |
|
"last": "Baevski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "North American Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "48--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, exten- sible toolkit for sequence modeling. In North American Association for Computational Lin- guistics (NAACL), pages 48-53.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "North American Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2227--2237", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextual- ized word representations. In North American Association for Computational Linguistics (NAACL), pages 2227-2237.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "CoNLL-2012 shared task: Modeling multilingual unrestricted coreference in ontonotes", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Sameer Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Moschitti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuchen", |
|
"middle": [], |
|
"last": "Uryupina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Joint Conference on EMNLP and CoNLL-Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sameer Pradhan, Alessandro Moschitti, Nianwen Xue, Olga Uryupina, and Yuchen Zhang. 2012. CoNLL-2012 shared task: Modeling multi- lingual unrestricted coreference in ontonotes. In Joint Conference on EMNLP and CoNLL- Shared Task, pages 1-40.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Using the output embedding to improve language models", |
|
"authors": [ |
|
{ |
|
"first": "Ofir", |
|
"middle": [], |
|
"last": "Press", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lior", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "157--163", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ofir Press and Lior Wolf. 2017. Using the out- put embedding to improve language models. In Proceedings of the 15th Conference of the European Chapter of the Association for Com- putational Linguistics: Volume 2, Short Papers, pages 157-163. Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Improving language understanding with unsupervised learning", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Karthik Narasimhan, Time Salimans, and Ilya Sutskever. 2018. Improving language un- derstanding with unsupervised learning, OpenAI.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Know what you don't know: Unanswerable questions for SQuAD", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "784--789", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Robin Jia, and Percy Liang. 2018. Know what you don't know: Unanswer- able questions for SQuAD. In Association for Computational Linguistics (ACL), pages 784-789.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "SQuAD: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2383--2392", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. In Empirical Methods in Natural Language Processing (EMNLP), pages 2383-2392.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Matching the blanks: Distributional similarity for relation learning", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Livio Baldini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [ |
|
"Arthur" |
|
], |
|
"last": "Soares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Fitzgerald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2895--2905", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Livio Baldini Soares, Nicholas Arthur FitzGerald, Jeffrey Ling, and Tom Kwiatkowski. 2019. Matching the blanks: Distributional similarity for relation learning. In Association for Compu- tational Linguistics (ACL), pages 2895-2905.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Perelygin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1631--1642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment treebank. In Empirical Methods in Natural Language Processing (EMNLP), pages 1631-1642.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "MASS: Masked sequence to sequence pre-training for language generation", |
|
"authors": [ |
|
{ |
|
"first": "Kaitao", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tie-Yan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Machine Learning (ICML)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5926--5936", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, and Tie-Yan Liu. 2019. MASS: Masked se- quence to sequence pre-training for language generation. In International Conference on Machine Learning (ICML), pages 5926-5936.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Blockwise parallel decoding for deep autoregressive models", |
|
"authors": [ |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Stern", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mitchell Stern, Noam Shazeer, and Jakob Uszkoreit. 2018. Blockwise parallel decod- ing for deep autoregressive models. In Advances in Neural Information Processing Systems (NIPS).", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "ERNIE: Enhanced representation through knowledge integration", |
|
"authors": [ |
|
{ |
|
"first": "Shuohuan", |
|
"middle": [], |
|
"last": "Yu Stephanie Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shikun", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuyi", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Han", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinlun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danxiang", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Hao Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.09223" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu Stephanie Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xinlun Tian, Danxiang Zhu, Hao Tian, and Hua Wu. 2019. ERNIE: Enhanced representation through knowledge integration. arXiv preprint arXiv:1904.09223.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "NewsQA: A machine comprehension dataset", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Trischler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xingdi", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Sordoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Bachman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaheer", |
|
"middle": [], |
|
"last": "Suleman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2nd Workshop on Representation Learning for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "191--200", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Trischler, Tong Wang, Xingdi Yuan, Justin Harris, Alessandro Sordoni, Philip Bachman, and Kaheer Suleman. 2017. NewsQA: A ma- chine comprehension dataset. In 2nd Work- shop on Representation Learning for NLP, pages 191-200.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems (NIPS).", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amapreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amapreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. GLUE: A multi-task benchmark and anal- ysis platform for natural language understand- ing. In International Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Neural network acceptability judgments", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Warstadt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1805.12471" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Warstadt, Amanpreet Singh, and Samuel R. Bowman. 2018. Neural network acceptability judgments. arXiv preprint arXiv:1805.12471.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "A broad-coverage challenge corpus for sentence understanding through inference", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "North American Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1112--1122", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sentence understanding through in- ference. In North American Association for Com- putational Linguistics (NAACL),pages1112-1122.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "HuggingFace's Transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R'emi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Brew", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.03771" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, R'emi Louf, Morgan Funtowicz, and Jamie Brew. 2019. HuggingFace's Transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "XLNet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems (NeurIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, and Quoc V. Le. 2019. XLNet: Generalized autoregressive pretraining for language understanding. In Advances in Neural Information Processing Systems (NeurIPS).", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "HotpotQA: A dataset for diverse, explainable multi-hop question answering", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saizheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2369--2380", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, William Cohen, Ruslan Salakhutdinov, and Christopher D. Manning. 2018. HotpotQA: A dataset for diverse, explainable multi-hop question answering. In Empirical Methods in Natural Language Processing (EMNLP), pages 2369-2380.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Position-aware attention and supervised data improve slot filling", |
|
"authors": [ |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "35--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuhao Zhang, Victor Zhong, Danqi Chen, Gabor Angeli, and Christopher D. Manning. 2017. Position-aware attention and supervised data improve slot filling. In Empirical Methods in Natural Language Processing (EMNLP), pages 35-45.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "ERNIE: Enhanced language representation with informative entities", |
|
"authors": [ |
|
{ |
|
"first": "Zhengyan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1441--1451", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhengyan Zhang, Xu Han, Zhiyuan Liu, Xin Jiang, Maosong Sun, and Qun Liu. 2019. ERNIE: Enhanced language representation with informative entities. In Association for Compu- tational Linguistics (ACL), pages 1441-1451.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "and replace the subject and object entities by their NER tags such as ''[CLS] [SUBJ-PER] was born in [OBJ-LOC] , Michigan, . . . '', and finally add a linear classifier on top of the [CLS] token to predict the relation type." |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table><tr><td>: Test results on SQuAD 1.1 and SQuAD</td></tr><tr><td>2.0.</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"text": "" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table><tr><td/><td>p</td><td>R</td><td>F1</td></tr><tr><td colspan=\"2\">BERT EM (Soares et al., 2019) \u2212</td><td colspan=\"2\">\u2212 70.1</td></tr><tr><td>BERT EM +MTB *</td><td>\u2212</td><td colspan=\"2\">\u2212 71.5</td></tr><tr><td>Google BERT</td><td colspan=\"3\">69.1 63.9 66.4</td></tr><tr><td>Our BERT</td><td colspan=\"3\">67.8 67.2 67.5</td></tr><tr><td>Our BERT-1seq</td><td colspan=\"3\">72.4 67.9 70.1</td></tr><tr><td>SpanBERT</td><td colspan=\"3\">70.8 70.9 70.8</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Performance on the OntoNotes coreference resolution benchmark. The main evaluation is the average F1 of three metrics: MUC, B 3 , and CEAF \u03c6 4 on the test set." |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"content": "<table><tr><td>incor-</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Test performance on the TACRED relation extraction benchmark. BERT large and BERT EM +MTB from Soares et al. (2019) are the current state-of-the-art." |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"content": "<table><tr><td/><td colspan=\"7\">SQuAD 2.0 NewsQA TriviaQA Coref MNLI-m QNLI GLUE (Avg)</td></tr><tr><td>Span Masking (2seq) + NSP</td><td>85.4</td><td>73.0</td><td>78.8</td><td>76.4</td><td>87.0</td><td>93.3</td><td>83.4</td></tr><tr><td>Span Masking (1seq)</td><td>86.7</td><td>73.4</td><td>80.0</td><td>76.3</td><td>87.3</td><td>93.8</td><td>83.8</td></tr><tr><td colspan=\"2\">Span Masking (1seq) + SBO 86.8</td><td>74.1</td><td>80.3</td><td>79.0</td><td>87.6</td><td>93.9</td><td>84.0</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"text": "The effect of replacing BERT's original masking scheme (Subword Tokens) with different masking schemes. Results are F1 scores for QA tasks and accuracy for MNLI and QNLI on the development sets. All the models are based on bi-sequence training with NSP." |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table", |
|
"text": "The effects of different auxiliary objectives, given MLM over random spans as the primary objective." |
|
} |
|
} |
|
} |
|
} |