|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:31:30.638196Z" |
|
}, |
|
"title": "Team JARS: DialDoc Subtask 1 -Improved Knowledge Identification with Supervised Out-of-Domain Pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Sopan", |
|
"middle": [], |
|
"last": "Khosla", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Language Technologies Institute Carnegie Mellon University", |
|
"location": { |
|
"addrLine": "5000 Forbes Avenue Pittsburgh", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Lovelace", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Language Technologies Institute Carnegie Mellon University", |
|
"location": { |
|
"addrLine": "5000 Forbes Avenue Pittsburgh", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ritam", |
|
"middle": [], |
|
"last": "Dutt", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Language Technologies Institute Carnegie Mellon University", |
|
"location": { |
|
"addrLine": "5000 Forbes Avenue Pittsburgh", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Adithya", |
|
"middle": [], |
|
"last": "Pratapa", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Language Technologies Institute Carnegie Mellon University", |
|
"location": { |
|
"addrLine": "5000 Forbes Avenue Pittsburgh", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper, we discuss our submission for DialDoc subtask 1. The subtask requires systems to extract knowledge from FAQ-type documents vital to reply to a user's query in a conversational setting. We experiment with pretraining a BERT-based question-answering model on different QA datasets from MRQA, as well as conversational QA datasets like CoQA and QuAC. Our results show that models pretrained on CoQA and QuAC perform better than their counterparts that are pretrained on MRQA datasets. Our results also indicate that adding more pretraining data does not necessarily result in improved performance. Our final model, which is an ensemble of AlBERT-XL pretrained on CoQA and QuAC independently, with the chosen answer having the highest average probability score, achieves an F1-Score of 70.9% on the official test-set.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper, we discuss our submission for DialDoc subtask 1. The subtask requires systems to extract knowledge from FAQ-type documents vital to reply to a user's query in a conversational setting. We experiment with pretraining a BERT-based question-answering model on different QA datasets from MRQA, as well as conversational QA datasets like CoQA and QuAC. Our results show that models pretrained on CoQA and QuAC perform better than their counterparts that are pretrained on MRQA datasets. Our results also indicate that adding more pretraining data does not necessarily result in improved performance. Our final model, which is an ensemble of AlBERT-XL pretrained on CoQA and QuAC independently, with the chosen answer having the highest average probability score, achieves an F1-Score of 70.9% on the official test-set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Question Answering (QA) involves constructing an answer for a given question in either an extractive or an abstractive manner. QA systems are central to other Natural Language Processing (NLP) applications like search engines, and dialogue. Recently, QA based solutions have also been proposed to evaluate factuality (Wang et al., 2020) and faithfulness (Durmus et al., 2020) of abstractive summarization systems.", |
|
"cite_spans": [ |
|
{ |
|
"start": 317, |
|
"end": 336, |
|
"text": "(Wang et al., 2020)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 354, |
|
"end": 375, |
|
"text": "(Durmus et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In addition to popular QA benchmarks like SQuAD (Rajpurkar et al., 2016) , and MRQA-2019 (Fisch et al., 2019) , we have seen QA challenges that require reasoning over human dialogue. Some notable examples being QuAC (Choi et al., 2018) and CoQA (Reddy et al., 2019) . These datasets require the model to attend to the entire dialogue context in the process of retrieving an answer. In this work, we are interesting in building a QA system to help with human dialogue. Feng et al. (2020) introduced a new dataset of goal-oriented dialogues (Doc2Dial) that are grounded in the associated documents. Each sample in the dataset consists of an information-seeking conversation between a user and an agent where agent's responses are grounded in FAQ-like webpages. DialDoc shared task derives its training data from the Doc2Dial dataset and proposes two subtasks which require the participants to (1) identify the grounding knowledge in form of document span for the next agent turn; and (2) generate the next agent response in natural language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 72, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 89, |
|
"end": 109, |
|
"text": "(Fisch et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 235, |
|
"text": "(Choi et al., 2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 245, |
|
"end": 265, |
|
"text": "(Reddy et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 468, |
|
"end": 486, |
|
"text": "Feng et al. (2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we describe our solution to the subtask 1. This subtask is formulated as a span selection problem. Therefore, we leverage a transformerbased extractive question-answering model Lan et al., 2019) to extract the relevant spans from the document. We pretrain our model on different QA datasets like SQuAD, different subsets of MRQA-2019 training set, and conversational QA datasets like CoQA and QuAC. We find that models pretrained on out-of-domain QA datasets substantially outperform the baseline. Our experiments suggest that conversational QA datasets are more useful than MRQA-2019 data or its subsets. In the following sections, we first present an overview of the DialDoc shared task ( \u00a72), followed by our system description ( \u00a73) and a detailed account of our experimental results, and ablation studies ( \u00a74, \u00a75).", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 209, |
|
"text": "Lan et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Dataset used in the DialDoc shared-task is derived from Doc2Dial dataset (Feng et al., 2020) , a new dataset with goal-oriented document-grounded dialogue. It includes a set of documents and conversations between a user and an agent grounded in the associated document. The authors provide annotations for dialogue acts for each utterance in the dialogue flow, along with the span in the document that acts as the reference of it.", |
|
"cite_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 92, |
|
"text": "(Feng et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DialDoc Shared Task Dataset", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The dataset shared during the shared task was divided into train/validation/testdev/test splits. Train and validation splits were provided to the participants to facilitate model development. During phase 1, the models were evaluated on testdev whereas, the final ranking was done on the performance on the test set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DialDoc Shared Task Dataset", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Pre-processing Using the pre-processing scripts provided by the task organizers, we converted the Doc2Dial dataset into SQuAD v2.0 format with questions containing the latest user utterance as well as all previous turns in the conversation. This is in line with previous work from (Feng et al., 2020) which showed that including the entire conversational history performs better than just considering the current user utterance. Dialogue context is concatenated with the latest user utterance in the reverse time order.", |
|
"cite_spans": [ |
|
{ |
|
"start": 281, |
|
"end": 300, |
|
"text": "(Feng et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DialDoc Shared Task Dataset", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The output of this pre-processing step consisted of 20431 training, 3972 validation, 727 testdev, and 2824 test instances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DialDoc Shared Task Dataset", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As discussed earlier, subtask 1 of DialDoc shared task is formulated as a span selection problem. Therefore, in order to learn to predict the correct span, we use an extractive question-answering setup.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We pass the pre-processed training data through a QA model that leverages a transformer encoder to contextually represent the question (dialogue history) along with the context (document). Since the grounding document is often longer than the maximum input sequence length for transformers, we follow (Feng et al., 2020) and truncate the documents in sliding windows with a stride. The document trunk and the dialogue history are passed through the transformer encoder to create contextual representations for each token in the input. To extract the beginning and the ending positions of the answer span within the document, the encoded embeddings are sent to a linear layer to output two logits that correspond to the probability of the position being the start and end position of the answer span. The training loss is computed using the Cross-Entropy loss function. We use the huggingface transformers toolkit in all of our experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 301, |
|
"end": 320, |
|
"text": "(Feng et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question-Answering Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Recent work (Gururangan et al., 2020) has shown that multi-phase domain adaptive pretraining of transformer-based encoders on related datasets (and tasks) benefits the overall performance of the model on the downstream task. Motivated by this, we experimented with further pretraining the QA model on different out-of-domain QA datasets to gauge its benefits on Doc2Dial (Table 1) ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 371, |
|
"end": 380, |
|
"text": "(Table 1)", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pretraining", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In this section, we discuss our experimental setup in detail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Firstly, we briefly describe the different datasets used for the continual pretraining of our transformer-based QA models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pretraining Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Shared task (Fisch et al., 2019) focused on evaluating the generalizability of QA systems. They developed a training set that combined examples from 6 different QA datasets and developed evaluation splits using 12 other QA datasets. We explored the effectiveness of pretraining on the entire MRQA training set as well on each of the 6 training datasets: SQuAD (Rajpurkar et al., 2016) , NewsQA (Trischler et al., 2017) , Nat-uralQuestions (Kwiatkowski et al., 2019) , Hot-potQA (Yang et al., 2018) , SearchQA (Dunn et al., 2017) , and TriviaQA (Joshi et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 32, |
|
"text": "(Fisch et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 384, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 394, |
|
"end": 418, |
|
"text": "(Trischler et al., 2017)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 439, |
|
"end": 465, |
|
"text": "(Kwiatkowski et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 497, |
|
"text": "(Yang et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 509, |
|
"end": 528, |
|
"text": "(Dunn et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 564, |
|
"text": "(Joshi et al., 2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "MRQA-19", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Conversational QA datasets. We also experiment with pretraining on two conversational QA datasets: QuAC ( CoQA (Reddy et al., 2019 ). 2 For both datasets, we filter out samples which do not adhere to SQuADlike extractive QA setup (e.g. yes/no questions) or have a context length of more than 5000 characters. Table 1 presents the size of the different pretraining datasets after the removal of non-extractive QA samples.", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 130, |
|
"text": "(Reddy et al., 2019", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 309, |
|
"end": 316, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "MRQA-19", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The shared-task relies on Exact Match (EM) and F1 metrics to evaluate the systems on subtask 1. To compute these scores, we use the metrics for SQuAD from huggingface. 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We use default parameters set by the subtask baseline provided by the authors. 4 However, we reduce the training per-device batch-size to 2 to accommodate the large models on an Nvidia Geforce GTX 1080 Ti 12GB GPU. We stop the continual out-ofdomain supervised pretraining after 2 epochs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hyperparameters", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We now present the results for different experimental setups we tried for DialDoc subtask 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Our first set of results portray the differential benefits of different out-of-domain QA datasets when used to pretrain the transformer encoder. Experiments with bert-base-uncased on the validation set (Table 2) portray that pretraining on different QA datasets is indeed beneficial. Datasets like SQuAD, NewsQA, and NaturalQuestions are more useful than SearchQA, and Trivi-aQA. However, pretraining on complete MRQA-2019 training set does not outperform the individual datasets suggesting that merely introducing more pretraining data might not result in improved performance. Furthermore, conversational QA datasets like CoQA and QuAC, which are more similar in their setup to DialDoc, perform substantially better than any of the other MRQA-2019 training datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 211, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pretraining on Different QA Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We observe similar trends with larger transformers (Table 3) . Models pretrained on QuAC or CoQA outperform those pretrained on SQuAD. However, combining CoQA and QuAC during pretraining does not seem to help with the performance on validation or testdev split.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 60, |
|
"text": "(Table 3)", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pretraining on Different QA Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Analyzing Different Transformer Variants Table 3 also contains the results for experiments where albert-xl is used to encode the questioncontext pair. We find that albert-xl-based models outperform their bert counterparts on validation set. However, they do not generalize well to the Testdev set, which contains about 30% of the test instances but is much smaller than validation set in size (727 samples in testdev vs 3972 in validation set).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pretraining on Different QA Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We only submitted our best performing models on the official test set due to a constraint on the number of submissions. Contrary to the trends for testdev phase, albert-xl models trained on conversational QA datasets perform the best. albert-xl + QuAC is the best-performing single model according to the EM metric (EM = 52.60), whereas albert-xl + CoQA performs the best on F1 metric (F 1 = 69.48) on the test set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results on test set", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We perform ensembling over the outputs of the model variants to obtain a single unified ranked list. For a given question Q, we produce 20 candidate spans, along with a corresponding probability score ps. We compute rank-scores rs for the answer-spans at rank r as rs = 1 log 2 (r+1) . We then aggregate the information of the answer spans for the model variants using the following techniques. Frequent:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensembling", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We chose the answer span which was the most frequent across the model variants. Rank Score :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensembling", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We chose the answer span which was the highest average rank score. Probability Score:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensembling", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We chose the answer span which was the highest average probability score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensembling", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We observe empirically that ensembling using the probability score performs the best and hence we report the results of ensembling using the probability score (E) in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 173, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ensembling", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We observe the highest gains after ensembling the outputs of all the 5 model variants on the validation test and test-dev set. However, the best performance on the test set was achieved by ensembling over the albert-xl models pre-trained independently on CoQA and QuAC (EM = 53.5, F 1 = 70.9). This was the final submission for our team.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensembling", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We investigate the disparate impact of pretraining on different MRQA-19 datasets on the Doc2Dial shared task. Specifically, we explored factors such as answer length, relative position of the answer in the context, question length, and context length in Table 4 . We observe that the SQuAD, NewsQA, and NaturalQuestions (NQ) has compartaively longer answers than the other datasets. However, we do not observe a noticeable difference in terms of question length, context length or relative position of the answer in the context, with respect to the other datasets. We also use the dataset of Li and Roth (2002) to train a BERT classifier to predict answer type of a question with 97% accuracy. The coarse-answer types are DESC (Description), NUM (Numerical), ENT (Entity), HUM (Person), LOC (Location) and ABBR (Abbreviation). We use the classifier to gauge the distribution of answer types on MRQA datasets and Doc2Dial. We observe from Figure 2 that a majority of questions in Doc2Dial require a descriptive answer. These DESC type questions are more prevelant in SQuAD, NewsQA, and NQ, which might explain their efficacy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 592, |
|
"end": 610, |
|
"text": "Li and Roth (2002)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 261, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 938, |
|
"end": 947, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Informed Data Selection", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "To ascertain the benefit of intelligent sampling, we pretrain on a much smaller subset of the SQuAD, NewsQA, and NaturalQuestions dataset, which we obtain via intelligent sampling. We select questions which satisfy one of the following criteria, (i) the answer length of the question is \u2265 50, (ii) the question includes 'how' or 'why' question word or (iii) the answer type of the question is 'DESC'. Overall, the size of the selected sample is only 20% of the original dataset, yet achieves a higher EM score than the combined dataset as seen in Table 2 . Yet, surprisingly, the performance is lower than each of the individual dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 547, |
|
"end": 554, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Informed Data Selection", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Our submission to the DialDoc subtask 1 performs continual pretraining of a transformer-based encoder on out-of-domain QA datasets. Experiments with different QA datasets suggest that conversational QA datasets like CoQA and QuAC are highly beneficial as their setup is substantially similar to Doc2Dial, the downstream dataset of interest. Our final submission ensembles two AlBERT-XL models independently pretrained on CoQA and QuAC and achieves an F1-Score of 70.9% and EM-Score of 53.5% on the competition test-set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this work, we tackle the task of question answering (QA) for English language text. While we believe that the proposed methods can be effective in other languages, we leave this exploration for future work. We also acknowledge that QA systems suffer from bias (Li et al., 2020) , which often lead to unintended real-world consequences. For the purpose of the shared task, we focused solely on the modeling techniques, but a study of model bias in our systems is necessary.", |
|
"cite_spans": [ |
|
{ |
|
"start": 263, |
|
"end": 280, |
|
"text": "(Li et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Impact Statement", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://huggingface.co/datasets/quac", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://huggingface.co/datasets/coqa 3 https://huggingface.co/metrics/squad 4 https://github.com/doc2dial/ sharedtask-dialdoc2021/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "QuAC: Question answering in context", |
|
"authors": [ |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "He", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wentau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2174--2184", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1241" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eunsol Choi, He He, Mohit Iyyer, Mark Yatskar, Wen- tau Yih, Yejin Choi, Percy Liang, and Luke Zettle- moyer. 2018. QuAC: Question answering in con- text. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2174-2184, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Searchqa: A new qa dataset augmented with context from a search engine", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Dunn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Levent", |
|
"middle": [], |
|
"last": "Sagun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Higgins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"U" |
|
], |
|
"last": "G\u00fcney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Volkan", |
|
"middle": [], |
|
"last": "Cirik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Dunn, Levent Sagun, Mike Higgins, V. U. G\u00fcney, Volkan Cirik, and Kyunghyun Cho. 2017. Searchqa: A new qa dataset augmented with context from a search engine. ArXiv, abs/1704.05179.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "FEQA: A question answering evaluation framework for faithfulness assessment in abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Esin", |
|
"middle": [], |
|
"last": "Durmus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "He", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5055--5070", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.454" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Esin Durmus, He He, and Mona Diab. 2020. FEQA: A question answering evaluation framework for faith- fulness assessment in abstractive summarization. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 5055- 5070, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "2020. doc2dial: A goal-oriented document-grounded dialogue dataset", |
|
"authors": [ |
|
{ |
|
"first": "Song", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chulaka", |
|
"middle": [], |
|
"last": "Gunasekara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Patel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sachindra", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luis", |
|
"middle": [], |
|
"last": "Lastras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8118--8128", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.652" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Song Feng, Hui Wan, Chulaka Gunasekara, Siva Patel, Sachindra Joshi, and Luis Lastras. 2020. doc2dial: A goal-oriented document-grounded dia- logue dataset. In Proceedings of the 2020 Confer- ence on Empirical Methods in Natural Language Processing (EMNLP), pages 8118-8128, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "MRQA 2019 shared task: Evaluating generalization in reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Fisch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Talmor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minjoon", |
|
"middle": [], |
|
"last": "Seo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2nd Workshop on Machine Reading for Question Answering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--13", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-5801" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Fisch, Alon Talmor, Robin Jia, Minjoon Seo, Eu- nsol Choi, and Danqi Chen. 2019. MRQA 2019 shared task: Evaluating generalization in reading comprehension. In Proceedings of the 2nd Work- shop on Machine Reading for Question Answering, pages 1-13, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Don't stop pretraining: Adapt language models to domains and tasks", |
|
"authors": [ |
|
{ |
|
"first": "Ana", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Marasovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Downey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8342--8360", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.740" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Ana Marasovi\u0107, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A. Smith. 2020. Don't stop pretraining: Adapt language models to domains and tasks. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8342-8360, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "TriviaQA: A large scale distantly supervised challenge dataset for reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Weld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1601--1611", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1147" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mandar Joshi, Eunsol Choi, Daniel Weld, and Luke Zettlemoyer. 2017. TriviaQA: A large scale dis- tantly supervised challenge dataset for reading com- prehension. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1601-1611, Van- couver, Canada. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Natural questions: a benchmark for question answering research. Transactions of the Association of Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennimaria", |
|
"middle": [], |
|
"last": "Palomaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivia", |
|
"middle": [], |
|
"last": "Redfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Alberti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danielle", |
|
"middle": [], |
|
"last": "Epstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Kelcey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Red- field, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Matthew Kelcey, Jacob Devlin, Kenton Lee, Kristina N. Toutanova, Llion Jones, Ming-Wei Chang, Andrew Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. 2019. Natu- ral questions: a benchmark for question answering research. Transactions of the Association of Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Albert: A lite bert for self-supervised learning of language representations", |
|
"authors": [ |
|
{ |
|
"first": "Zhenzhong", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piyush", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Soricut", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learning of language representations. In International Con- ference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "UNQOVERing stereotyping biases via underspecified questions", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Khashabi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tushar", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3475--3489", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.311" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Li, Daniel Khashabi, Tushar Khot, Ashish Sab- harwal, and Vivek Srikumar. 2020. UNQOVERing stereotyping biases via underspecified questions. In Findings of the Association for Computational Lin- guistics: EMNLP 2020, pages 3475-3489, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Learning question classifiers", |
|
"authors": [ |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "COLING 2002: The 19th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Li and Dan Roth. 2002. Learning question clas- sifiers. In COLING 2002: The 19th International Conference on Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "SQuAD: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2383--2392", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1264" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 2383-2392, Austin, Texas. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "CoQA: A conversational question answering challenge", |
|
"authors": [ |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "249--266", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00266" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siva Reddy, Danqi Chen, and Christopher D. Manning. 2019. CoQA: A conversational question answering challenge. Transactions of the Association for Com- putational Linguistics, 7:249-266.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "NewsQA: A machine comprehension dataset", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Trischler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xingdi", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Sordoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Bachman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaheer", |
|
"middle": [], |
|
"last": "Suleman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2nd Workshop on Representation Learning for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "191--200", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-2623" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Trischler, Tong Wang, Xingdi Yuan, Justin Har- ris, Alessandro Sordoni, Philip Bachman, and Ka- heer Suleman. 2017. NewsQA: A machine compre- hension dataset. In Proceedings of the 2nd Work- shop on Representation Learning for NLP, pages 191-200, Vancouver, Canada. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Asking and answering questions to evaluate the factual consistency of summaries", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5008--5020", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.450" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Kyunghyun Cho, and Mike Lewis. 2020. Asking and answering questions to evaluate the fac- tual consistency of summaries. In Proceedings of the 58th Annual Meeting of the Association for Com- putational Linguistics, pages 5008-5020, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "HotpotQA: A dataset for diverse, explainable multi-hop question answering", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saizheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2369--2380", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1259" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, William Cohen, Ruslan Salakhutdinov, and Christo- pher D. Manning. 2018. HotpotQA: A dataset for diverse, explainable multi-hop question answer- ing. In Proceedings of the 2018 Conference on Em- pirical Methods in Natural Language Processing, pages 2369-2380, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Distribution of Question Words for MRQA.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Distribution of Answer Types for MRQA.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": ".", |
|
"content": "<table><tr><td>QA Dataset</td><td>Domain</td><td># Samples</td></tr><tr><td>SQuAD</td><td>Wikipedia</td><td>86k</td></tr><tr><td>NewsQA</td><td>News</td><td>74k</td></tr><tr><td>NaturalQuestions</td><td>Wikipedia</td><td>104k</td></tr><tr><td>HotpotQA</td><td>Wikipedia</td><td>73k</td></tr><tr><td>SearchQA</td><td>Jeopardy</td><td>117k</td></tr><tr><td>TriviaQA</td><td>Trivia</td><td>62k</td></tr><tr><td>MRQA-19 (Train)</td><td>Mixed</td><td>516k</td></tr><tr><td>QuAC</td><td>Wikipedia</td><td>70k</td></tr><tr><td>CoQA</td><td>Kids' Stories, Literature,</td><td>70k</td></tr><tr><td/><td>Exams, News, Wikipedia</td><td/></tr></table>" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Statistics (domain, # samples) for different QA datasets used for continual pre-training.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "", |
|
"content": "<table><tr><td>: Performance (EM (%), F1 (%)) of</td></tr><tr><td>bert-base-uncased on DialDoc validation set</td></tr><tr><td>when further pretrained on different QA datasets.</td></tr></table>" |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "", |
|
"content": "<table><tr><td>: Performance (EM (%), F1 (%)) of large</td></tr><tr><td>transformer-based QA models on DialDoc validation</td></tr><tr><td>and testdev set when further pretrained on different QA</td></tr></table>" |
|
}, |
|
"TABREF7": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Statistics of Average Question Length, Average Answer Length, Average Context Length, and Average Relative Position of the Answer in the Context for Doc2Dial and different MRQA subsets.", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |