|
{ |
|
"paper_id": "Q19-1014", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:08:58.866728Z" |
|
}, |
|
"title": "DREAM: A Challenge Data Set and Models for Dialogue-Based Reading Comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Cornell University", |
|
"location": { |
|
"settlement": "Ithaca", |
|
"region": "NY", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Dian", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "AI Lab", |
|
"location": { |
|
"settlement": "Bellevue", |
|
"region": "Tencent, WA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jianshu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "AI Lab", |
|
"location": { |
|
"settlement": "Bellevue", |
|
"region": "Tencent, WA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "AI Lab", |
|
"location": { |
|
"settlement": "Bellevue", |
|
"region": "Tencent, WA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Cornell University", |
|
"location": { |
|
"settlement": "Ithaca", |
|
"region": "NY", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present DREAM, the first dialogue-based multiple-choice reading comprehension data set. Collected from English as a Foreign Language examinations designed by human experts to evaluate the comprehension level of Chinese learners of English, our data set contains 10,197 multiple-choice questions for 6,444 dialogues. In contrast to existing reading comprehension data sets, DREAM is the first to focus on in-depth multi-turn multi-party dialogue understanding. DREAM is likely to present significant challenges for existing reading comprehension systems: 84% of answers are non-extractive, 85% of questions require reasoning beyond a single sentence, and 34% of questions also involve commonsense knowledge. We apply several popular neural reading comprehension models that primarily exploit surface information within the text and find them to, at best, just barely outperform a rulebased approach. We next investigate the effects of incorporating dialogue structure and different kinds of general world knowledge into both rule-based and (neural and non-neural) machine learning-based reading comprehension models. Experimental results on the DREAM data set show the effectiveness of dialogue structure and general world knowledge. DREAM is available at https://dataset. org/dream/.", |
|
"pdf_parse": { |
|
"paper_id": "Q19-1014", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present DREAM, the first dialogue-based multiple-choice reading comprehension data set. Collected from English as a Foreign Language examinations designed by human experts to evaluate the comprehension level of Chinese learners of English, our data set contains 10,197 multiple-choice questions for 6,444 dialogues. In contrast to existing reading comprehension data sets, DREAM is the first to focus on in-depth multi-turn multi-party dialogue understanding. DREAM is likely to present significant challenges for existing reading comprehension systems: 84% of answers are non-extractive, 85% of questions require reasoning beyond a single sentence, and 34% of questions also involve commonsense knowledge. We apply several popular neural reading comprehension models that primarily exploit surface information within the text and find them to, at best, just barely outperform a rulebased approach. We next investigate the effects of incorporating dialogue structure and different kinds of general world knowledge into both rule-based and (neural and non-neural) machine learning-based reading comprehension models. Experimental results on the DREAM data set show the effectiveness of dialogue structure and general world knowledge. DREAM is available at https://dataset. org/dream/.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Recently a significant amount of research has focused on the construction of large-scale multiple-choice (Lai et al., 2017; Khashabi et al., 2018; Ostermann et al., 2018) and extractive (Hermann et al., 2015; Hill et al., 2016; Rajpurkar et al., 2016; Trischler et al., 2017) reading comprehension data sets (Section 2). Source documents in these data sets have generally been drawn from formal written texts such as news, fiction, and Wikipedia articles, which are commonly considered wellwritten, accurate, and neutral.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 123, |
|
"text": "(Lai et al., 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 124, |
|
"end": 146, |
|
"text": "Khashabi et al., 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 170, |
|
"text": "Ostermann et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 208, |
|
"text": "(Hermann et al., 2015;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 227, |
|
"text": "Hill et al., 2016;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 228, |
|
"end": 251, |
|
"text": "Rajpurkar et al., 2016;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 275, |
|
"text": "Trischler et al., 2017)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "With the goal of advancing research in machine reading comprehension and facilitating dialogue understanding, we construct and present DREAM -the first multiple-choice Dialoguebased REAding comprehension exaMination data set. We collect 10,197 questions for 6,444 multiturn multi-party dialogues from English language exams, which are carefully designed by educational experts (e.g., English teachers) to assess the comprehension level of Chinese learners of English. Each question is associated with three answer options, exactly one of which is correct. (See Table 1 for an example.) DREAM covers a variety of topics and scenarios in daily life such as conversations on the street, on the phone, in a classroom or library, at the airport or the office or a shop (Section 3).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 561, |
|
"end": 568, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Based on our analysis of DREAM, we argue that dialogue-based reading comprehension is at least as difficult as existing non-conversational counterparts. In particular, answering 34% of DREAM questions requires unspoken commonsense knowledge, for example, unspoken scene information. This might be due to the nature of dialogues: For efficient oral communication, people rarely state obvious explicit world knowledge (Forbes and Choi, 2017) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 416, |
|
"end": 439, |
|
"text": "(Forbes and Choi, 2017)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Dialogue 1 (D1) W: Tom, look at your shoes. How dirty they are! You must clean them. M: Oh, mum, I just cleaned them yesterday. W: They are dirty now. You must clean them again. M: I do not want to clean them today. Even if I clean them today, they will get dirty again tomorrow. W: All right, then. M: Mum, give me something to eat, please. W: You had your breakfast in the morning, Tom, and you had lunch at school. M: I am hungry again. W: Oh, hungry? But if I give you something to eat today, you will be hungry again tomorrow.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "such as ''Christmas Day is celebrated on December 25th.''Understanding", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Q1 Why did the woman say that she wouldn't give him anything to eat? A. Because his mother wants to correct his bad habit. B. Because he had lunch at school. C. Because his mother wants to leave him hungry. the social implications of an utterance as well as inferring a speaker's intentions is also regularly required for answering dialogue-based questions. The dialogue content in Table 1 , for example, is itself insufficient for readers to recognize the intention of the female speaker (W) in the first question (Q1). However, world knowledge is rarely considered in state-of-the-art reading comprehension models (Tay et al., 2018; Wang et al., 2018b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 616, |
|
"end": 634, |
|
"text": "(Tay et al., 2018;", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 635, |
|
"end": 654, |
|
"text": "Wang et al., 2018b)", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 382, |
|
"end": 389, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "such as ''Christmas Day is celebrated on December 25th.''Understanding", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Moreover, dialogue-based questions can cover information imparted across multiple turns involving multiple speakers. In DREAM, approximately 85% of questions can only be answered by considering the information from multiple sentences. For example, to answer Q1 in Table 3 later in the paper regarding the date of birth of the male speaker (M), the supporting sentences (in bold) include ''You know, tomorrow is Christmas Day'' from the female speaker and ''. . . I am more than excited about my birthday, which will come in two days'' from the male speaker. Compared with ''multiple-sentence questions'' in traditional reading comprehension data sets, DREAM further requires an understanding of the turn-based structure of dialogue-for example, for aligning utterances with their corresponding speakers.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 264, |
|
"end": 271, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "such as ''Christmas Day is celebrated on December 25th.''Understanding", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As only 16% of correct answer options are text spans from the source documents, we primarily explore rule-based methods and state-of-theart neural models designed for multiple-choice reading comprehension (Section 4). We find first that neural models designed for non-dialoguebased reading comprehension Dhingra et al., 2017; Wang et al., 2018b) do not fare well: The highest achieved accuracy is 45.5%, only slightly better than the accuracy (44.6%) of a simple lexical baseline (Richardson et al., 2013) . For the most part, these models fundamentally exploit only surface-level information from the source documents. Considering the abovementioned challenges, however, we hypothesize that incorporating general world knowledge and aspects of the dialogue structure would allow a better understanding of the dialogues. As a result, we modify our baseline systems to include (1) general world knowledge in the form of such as ConceptNet relations (Speer et al., 2017 ) and a pre-trained language model (Radford et al., 2018) , and (2) speaker information for each utterance. Experiments show the effectiveness of these factors on the lexical baselines as well as neural and non-neural machine learning approaches: We acquire up to 11.9% absolute gain in accuracy compared with the highest performance achieved by the state-of-the-art reading comprehension model (Wang et al., 2018b) , which mainly relies on explicit surface-level information in the text (Section 5).", |
|
"cite_spans": [ |
|
{ |
|
"start": 304, |
|
"end": 325, |
|
"text": "Dhingra et al., 2017;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 345, |
|
"text": "Wang et al., 2018b)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 505, |
|
"text": "(Richardson et al., 2013)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 948, |
|
"end": 967, |
|
"text": "(Speer et al., 2017", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 1003, |
|
"end": 1025, |
|
"text": "(Radford et al., 2018)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1363, |
|
"end": 1383, |
|
"text": "(Wang et al., 2018b)", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "such as ''Christmas Day is celebrated on December 25th.''Understanding", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, we see a significant gap between the best automated approach (59.5%) and human ceiling performance (98.6%) on the DREAM data set. This provides yet additional evidence that dialogue-based reading comprehension is a very challenging task. We hope that it also inspires the research community to develop methods for the dialogue-based reading comprehension task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "such as ''Christmas Day is celebrated on December 25th.''Understanding", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We divide reading comprehension data sets into three categories based on the types of answers: extractive, abstractive, and multiple choice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In recent years, we have seen increased interest in large-scale cloze/span-based reading comprehension", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extractive and Abstractive Data Sets", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "CoQA RACE DREAM (this work)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NarrativeQA", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Answer type extractive abstractive abstractive multiple-choice multiple-choice Source document type written text written text written text written text dialogue # of source documents 536 1,572 8,399 27,933 6,444 Average answer Table 2 : Distribution of answer (or correct answer option) types in three kinds of reading comprehension data sets. Statistics of other data sets come from Reddy et al. (2018) , Ko\u010disk\u1ef3 et al. (2018) , and Lai et al. (2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 400, |
|
"end": 419, |
|
"text": "Reddy et al. (2018)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 443, |
|
"text": "Ko\u010disk\u1ef3 et al. (2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 450, |
|
"end": 467, |
|
"text": "Lai et al. (2017)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 242, |
|
"text": "type extractive abstractive abstractive multiple-choice multiple-choice Source document type written text written text written text written text dialogue # of source documents 536 1,572 8,399 27,933 6,444 Average answer", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 250, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "NarrativeQA", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "data set construction (Hermann et al., 2015; Hill et al., 2016; Onishi et al., 2016; Rajpurkar et al., 2016; Bajgar et al., 2016; Nguyen et al., 2016; Trischler et al., 2017; Joshi et al., 2017; . We regard them as extractive since candidate answers are usually short spans from source documents. State-of-the-art neural models with attention mechanisms already achieve very high performance based on local lexical information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 44, |
|
"text": "(Hermann et al., 2015;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 45, |
|
"end": 63, |
|
"text": "Hill et al., 2016;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 64, |
|
"end": 84, |
|
"text": "Onishi et al., 2016;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 85, |
|
"end": 108, |
|
"text": "Rajpurkar et al., 2016;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 129, |
|
"text": "Bajgar et al., 2016;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 130, |
|
"end": 150, |
|
"text": "Nguyen et al., 2016;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 151, |
|
"end": 174, |
|
"text": "Trischler et al., 2017;", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 194, |
|
"text": "Joshi et al., 2017;", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NarrativeQA", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Recently researchers work on the construction of spoken span-based data sets Li et al., 2018) by applying text-to-speech technologies or recruiting human speakers based on formal written document-based data sets such as SQuAD (Rajpurkar et al., 2016) . Some spanbased conversation data sets are constructed from a relatively small size of dialogues from television shows (Chen and Choi, 2016; Ma et al., 2018) . Considering the limitations in extractive data sets, answers in abstractive data sets such as MS MARCO (Nguyen et al., 2016) , SearchQA (Dunn et al., 2017) , and NarrativeQA (Ko\u010disk\u1ef3 et al., 2018) are human-crowdsourced based on source documents or summaries. Concurrently, there is a growing interest in conversational reading comprehension such as CoQA (Reddy et al., 2018) . Because annotators tend to copy spans as answers (Reddy et al., 2018) , the majority of answers are still extractive in these data sets (Table 2) . Compared to the data sets mentioned above, most of the correct answer options (83.7%) in DREAM are free-form text.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 93, |
|
"text": "Li et al., 2018)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 250, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 371, |
|
"end": 392, |
|
"text": "(Chen and Choi, 2016;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 409, |
|
"text": "Ma et al., 2018)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 515, |
|
"end": 536, |
|
"text": "(Nguyen et al., 2016)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 548, |
|
"end": 567, |
|
"text": "(Dunn et al., 2017)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 586, |
|
"end": 608, |
|
"text": "(Ko\u010disk\u1ef3 et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 767, |
|
"end": 787, |
|
"text": "(Reddy et al., 2018)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 839, |
|
"end": 859, |
|
"text": "(Reddy et al., 2018)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 926, |
|
"end": 935, |
|
"text": "(Table 2)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "NarrativeQA", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We primarily discuss the multiple-choice data sets, in which answer options are not restricted to extractive text spans in the given document. Instead, most of the correct answer options are abstractive (Table 2) . Multiple-choice data sets involve extensive human involvement for problem generation during crowdsourcing (i.e., questions, correct answer option, and distractors). Besides surface matching, a significant portion of questions require multiple-sentence reasoning and external knowledge (Richardson et al., 2013; Mostafazadeh et al., 2016; Khashabi et al., 2018; Ostermann et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 500, |
|
"end": 525, |
|
"text": "(Richardson et al., 2013;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 526, |
|
"end": 552, |
|
"text": "Mostafazadeh et al., 2016;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 553, |
|
"end": 575, |
|
"text": "Khashabi et al., 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 576, |
|
"end": 599, |
|
"text": "Ostermann et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 203, |
|
"end": 212, |
|
"text": "(Table 2)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Multiple-Choice Data Sets", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Besides crowdsourcing, some data sets are collected from examinations designed by educational experts (Penas et al., 2014; Shibuki et al., 2014; Tseng et al., 2016; Clark et al., 2016; Lai et al., 2017; Mihaylov et al., 2018) , which aim to test human examinees. There are various types of complicated questions such as math word problems, summarization, logical reasoning, and sentiment analysis. Because we can adopt more objective evaluation criteria such as accuracy, these questions are usually easy to grade. Besides, questions from examinations are generally clean and high-quality. Therefore, human performance ceiling on this kind of data set is much higher (e.g., 94.5% on RACE [Lai et al., 2017] and 98.6% on DREAM in accuracy) than that of data sets built by crowdsourcing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 122, |
|
"text": "(Penas et al., 2014;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 144, |
|
"text": "Shibuki et al., 2014;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 164, |
|
"text": "Tseng et al., 2016;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 165, |
|
"end": 184, |
|
"text": "Clark et al., 2016;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 185, |
|
"end": 202, |
|
"text": "Lai et al., 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 225, |
|
"text": "Mihaylov et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 688, |
|
"end": 706, |
|
"text": "[Lai et al., 2017]", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multiple-Choice Data Sets", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In comparison, we present the first multiplechoice dialogue-based data set from examinations that contains a large percentage of questions that require multiple sentence inference. To the best of our knowledge, DREAM also contains the largest number of questions involving commonsense reasoning compared with other examination data sets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multiple-Choice Data Sets", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In this section, we describe how we construct DREAM (Section 3.1) and provide a detailed analysis of this data set (Section 3.2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We collect dialogue-based comprehension problems from a variety of English language exams (including practice exams) such as National College Entrance Examination, College English Test, and Public English Test, 1 which are designed by human experts to assess either the listening or reading comprehension level of Chinese English 1 We list all the Web sites used for data collection in the released data set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 330, |
|
"end": 331, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collection Methodology", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Value # of answer options per question 3 # of turns 30,183 Avg./Max. # of questions per dialogue 1.6 / 10 Avg./Max. # of speakers per dialogue 2.0 / 7 Avg./Max. # of turns per dialogue 4.7 / 48 Avg./Max. option length (in tokens) 5.3 / 21 Avg./Max. question length (in tokens) 8.6 / 24 Avg./Max. dialogue length (in tokens) 85.9 / 1,290 vocabulary size 13,037 learners in high schools and colleges (for individuals aged 12-22 years). All the problems in DREAM are freely accessible online for public usage. Each problem consists of a dialogue and a series of multiple-choice questions. To ensure every question is associated with exactly three answer options, we drop wrong answer option(s) randomly for questions with more than three options. We remove duplicate problems and randomly split the data at the problem level, with 60% train, 20% development, and 20% test.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metric", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We summarize the statistics of DREAM in Table 4 and data split in Table 5 . Compared with existing data sets built from formal written texts, the vocabulary size is relatively small since spoken English by its nature makes greater use of highfrequency words and needs a smaller vocabulary for efficient real-time communication (Nation, 2006) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 327, |
|
"end": 341, |
|
"text": "(Nation, 2006)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 47, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 66, |
|
"end": 73, |
|
"text": "Table 5", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We categorize questions into two main categories according to the types of knowledge required to answer them: matching and reasoning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Matching A question is entailed or paraphrased by exactly one sentence in a dialogue. The answer can be extracted from the same sentence. For example, we can easily verify the correctness of the question-answer pair (''What kind of room does the man want to rent?'', ''A two-bedroom apartment.'')", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "based on the sentence ''M: I'm interested in renting a two-bedroom apartment.'' This category is further divided into two categories, word matching and paraphrasing, in previous work Trischler et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 206, |
|
"text": "Trischler et al., 2017)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Reasoning Questions that cannot be answered by the surface meaning of a single sentence belong to this category. We further define four subcategories as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "-Summary Answering this kind of questions requires the whole picture of a dialogue, such as the topic of a dialogue and the relation between speakers (e.g., D2-Q3 in Table 3 ). Under this category, questions such as ''What are the two speakers talking about?'' and ''What are the speakers probably doing?'' are frequently asked. -Logic We require logical reasoning to answer questions in this category. We usually need to identify logically implied relations among multiple sentences in a dialogue. To reduce the ambiguity during the annotation, we regard a question that can only be solved by considering the content of multiple sentences and does not belong to the summary subcategory that involves all the sentences in a dialogue as a logic question. Following this definition, both D2-Q1 and D2-Q2 in Table 3 belong to this category. -Arithmetic Inferring the answer requires arithmetic knowledge (e.g., D2-Q1 in Table 3 requires 25 \u2212 1 + 2 = 26).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 173, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 805, |
|
"end": 812, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 917, |
|
"end": 924, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "-Commonsense To answer questions under this subcategory, besides the textual information in the dialogue, we also require external commonsense knowledge that cannot be obtained from the dialogue. For instance, all questions in Table 3 fall under this category. D2-Q1 and D2-Q2 in Table 3 belong to both logic and commonsense since they require multiple sentences as well as commonsense knowledge for question answering. There exist multiple types of commonsense knowledge in DREAM such as the well-known properties of a highly recognizable entity (e.g., D2-Q1 in Table 3 ), the prominent relationshipbetween two speakers (e.g., D2-Q3 in Table 6 : Distribution (%) of question types.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 227, |
|
"end": 234, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 287, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 563, |
|
"end": 570, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 637, |
|
"end": 644, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Table 3), the knowledge of or shared by a particular culture (e.g., when a speaker says ''Cola? I think it tastes like medicine.'', she/he probably means ''I don't like cola.''), and the cause-effect relation between events (e.g., D1-Q1 in Table 1 ). We refer readers to LoBue and Yates (2011) for detailed definitions. Table 6 shows the question type distribution labeled by two human annotators on 25% questions randomly sampled from the development and test sets. Besides the previously defined question categories, we also report the percentage of questions that require reasoning over multiple sentences (i.e., summary or logic questions) and the percentage of questions that require the surfacelevel understanding or commonsense/math knowledge based on the content of a single sentence. As a question can belong to multiple reasoning subcategories, the summation of the percentage of reasoning subcategories is not equal to the percentage of reasoning. The Cohen's kappa coefficient is 0.67 on the development set and 0.68 on the test set.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 247, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 327, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Dialogues in DREAM are generally clean and mostly error-free because they are carefully designed by educational experts. However, it is not guaranteed that each dialogue is written or proofread by a native speaker. Besides, dialogues tend to be more proper and less informal for exam purposes. To have a rough estimation of the quality of dialogues in DREAM and the differences between these dialogues and more casual ones in movies or television shows, we run a proofreading tool-Grammarly 2 -on all the dialogues from the annotated 25% instances of the development set and the same size (20.7k tokens) of dialogues from Friends, a famous American television show whose transcripts are commonly used for dialogue understanding (Chen and Choi, 2016; Ma et al., 2018) . As shown in Table 7 , there exist fewer spelling mistakes and the overall score is slightly higher than that of the dialogues in Friends.", |
|
"cite_spans": [ |
|
{ |
|
"start": 728, |
|
"end": 749, |
|
"text": "(Chen and Choi, 2016;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 750, |
|
"end": 766, |
|
"text": "Ma et al., 2018)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 781, |
|
"end": 788, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Based on the evaluated instances, articles and verb forms are the two most frequent grammar error categories (10 and 8, respectively, out of 23) in DREAM. Besides, the language tends to be less precise in DREAM, indicated by the number of vocabulary suggestions. For example, experts tend to use expressions such as ''really hot, '' ''really beautiful,'' ''very bad,'' and ''very important'' rather than more appropriate yet more advanced adjectives that might hinder reading comprehension of language learners with smaller vocabularies. According to the explanations provided by the tool, the readability scores for both data sets fall into the same category ''Your text is very simple and easy to read, likely to be understood by an average 5th-grader (age 10).''", |
|
"cite_spans": [ |
|
{ |
|
"start": 330, |
|
"end": 379, |
|
"text": "'' ''really beautiful,'' ''very bad,'' and ''very", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We formally introduce the dialogue-based reading comprehension task and notations in Section 4.1. To investigate the effects of different kinds of general world knowledge and dialogue structure, we incorporate them into rule-based approaches (Section 4.2) as well as non-neural (Section 4.3) and neural (Section 4.4) machine learning approaches. We describe in detail preprocessing and training in Section 4.5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approaches", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We start with a formal definition of the dialoguebased multiple-choice reading comprehension task. An n-turn dialogue D is defined as D = {s 1 : t 1 , s 2 : t 2 , . . . , s n : t n }, where s i represents the speaker ID (e.g., ''M'' and ''W''), and t i represents the text of the i th turn. Let Q denote the text of question, and O 1..3 denote the text of three answer options. The task is to choose the correct one from answer options O 1..3 associated with question Q given dialogue D. In this paper, we regard this task as a three-class classification problem, each class corresponding to an answer option. For convenience, we define the following notations, which will be referred in the rest of this paper. Let D s denote the turns spoken by speaker", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Notations", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "s in D. Formally, D s = {s i 1 : t i 1 , s i 2 : t i 2 , . . . , s i m : t i m } where {i 1 , i 2 , . . . , i m } = {i | s i = s} and i 1 < i 2 < . . . < i m .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Notations", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In particular, s = * denotes all the speakers. W D s and W O i denote the ordered set of the running words (excluding punctuation marks) in D s and O i , respectively. Questions designed for dialoguebased reading comprehension often focus on a particular speaker. If there is exactly one speaker mentioned in a question, we use s Q to denote this target speaker. Otherwise, s Q = * . For example, given the dialogue in Table 3 , s Q =''M'' for Question 1 and 2, and s Q = * for Question 3.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 419, |
|
"end": 426, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Notations", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We first attempt to incorporate dialogue structure information into sliding window (SW), a rulebased approach developed by Richardson et al. (2013) . This approach matches a bag of words constructed from a question Q and one of its answer option O i with a given document, and calculates the TF-IDF style matching score for each answer option.", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 147, |
|
"text": "Richardson et al. (2013)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rule-Based Approaches", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "LetD s ,Q, and\u00d4 i be the unordered set of distinct words (excluding punctuation marks) in D s , Q, and O i , respectively. Instead of only regarding dialogue D as a non-conversational text snippet, we also pay special attention to the context that is relevant to the target speaker mentioned in the question. Therefore, given a target speaker s Q , we propose to compute a speaker-focused sliding window score for each answer option O i , by matching a bag of words constructed from Q and O i with D s Q (i.e., turns spoken by s Q ). Given speaker s, we formally define the sliding window score sw of O i as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rule-Based Approaches", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "sw s i = max j k=1...|T i | \u23a7 \u23a8 \u23a9 ic s (W D s j+k ) if W D s j+k \u2208 T i 0 otherwise", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Rule-Based Approaches", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where ic s (w) = log 1 +", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rule-Based Approaches", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "1 i 1 1(W D s i =w) , T i = O i \u222aQ, and W D s i denotes the i-th word in W D s .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rule-Based Approaches", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Based on these definitions, we can regard sw * i as the general score defined in the original sliding window approach, and sw s Q i represents the speakerfocused sliding window score considering the target speaker s Q .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rule-Based Approaches", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Because the sliding window score ignores longrange dependencies, Richardson et al. (2013) introduce a distance-based variation (DSW), in which a word-distance based score is subtracted from the sliding window score to arrive at the final score. Similarly, we calculate the speaker-focused distance-based score given a (Q, O i ) pair and s Q , by counting the distance between the occurrence of a word in Q and a word in O i in D s Q . More formally, given speaker s and a set of stop words 3 U , the distance-based score d of O i is defined as", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 89, |
|
"text": "Richardson et al. (2013)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rule-Based Approaches", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "d s i = \u23a7 \u23a8 \u23a9 1 if |I s Q | = 0 or |I s O i | = 0 \u03b4 s i |W D s |\u22121 otherwise", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Rule-Based Approaches", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rule-Based Approaches", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "I s Q = (Q \u2229D s ) \u2212 U , I s O i = (\u00d4 i \u2229D s ) \u2212 Q \u2212 U ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rule-Based Approaches", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "i is the minimum number of words between an occurrence of a question word and an answer option word in W D s , plus one. The formal definition of \u03b4 s i is as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b4 s i = min W D s j \u2208I s Q ,W D s k \u2208I s O i |j \u2212 k| + 1", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Based on these definitions, we can regard d * i as the distance-based score defined in the original sliding window approach, and d", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "s Q", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "i represents the speaker-focused distance-based score considering speaker s Q . In addition, the final distance-based sliding window score of O i (Richardson et al., 2013) can be formulated as", |
|
"cite_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 171, |
|
"text": "(Richardson et al., 2013)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "sw * i \u2212 d * i", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Expression (4) only focuses on the general (or speaker-independent) information (i.e., sw * i and d * i ); we can capture general and speaker-focused information (i.e., sw", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s Q i , and d s Q i ) simultaneously by averaging them: sw s Q i + sw * i 2 \u2212 d s Q i + d * i 2", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Since a large percentage of questions cannot be solved by word-level matching, we also attempt to incorporate general world knowledge into our rule-based method. We calculate cs s i , the 3 We use the list of stop words from NLTK (Bird and Loper, 2004) . maximum cosine similarity between O i and consecutive words of the same length in W D s , as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 189, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 252, |
|
"text": "(Bird and Loper, 2004)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "cs s i = max j cos W O i , W D s j...j+|W O i |\u22121 (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where x is obtained by averaging the embeddings of the constituent words in x. Here we use Concept-Net embeddings (Speer et al., 2017) because they leverage the knowledge graph that focuses on general world knowledge. Following Expression (5), we capture both general and speaker-focused semantic information within a dialogue as follows.", |
|
"cite_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 134, |
|
"text": "(Speer et al., 2017)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "cs s Q i + cs * i 2", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To make the final answer option selection, our rule-based method combines Expressions (5) and 7:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "arg max i sw s Q i + sw * i 2 \u2212 d s Q i + d * i 2 + cs s Q i + cs * i 2", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "and \u03b4 s", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To explore what features are effective for dialogue understanding, we first consider a gradient boosting decision tree (GBDT) classifier. Besides the conventional bag-of-words features, we primarily focus on features related to general world knowledge and dialogue structure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature-Based Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 Bag of words of each answer option.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature-Based Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 Features inspired by rule-based approaches: We adopt the features introduced in Section 4.2, including speaker-independent scores (i.e., sw * i and d * i ) and speaker-focused scores (i.e., sw We consider matching position because of our observation of the existence of concessions and negotiations in dialogues (Amgoud et al., 2007) . We assume the facts or opinions expressed near the end of a dialogue tend to be more critical for us to answer a question.", |
|
"cite_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 335, |
|
"text": "(Amgoud et al., 2007)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature-Based Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "s Q i and d s Q i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature-Based Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 Pointwise mutual information (PMI): pmi", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature-Based Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s Q max,1..3 , pmi * max,1..3 , pmi s Q min,1..3 , pmi * min,1..3 , pmi s Q avg,1..3 , and pmi * avg,1..3 , where pmi s f,i is defined as pmi s f,i = j log f k C 2 (W O i j ,W D s k ) C 1 (W O i j )C 1 (W D s k ) |W O i |", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Feature-Based Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "C 1 (w) denotes the word frequency of w in external copora (we use Reddit posts [Tan and Lee, 2015] ), and C 2 (w 1 , w 2 ) represents the co-occurrence frequency of word w 1 and w 2 within a distance < K in external copora. We use PMI to evaluate the relatedness between the content of an answer option and the target-speaker-focused context based on co-occurrences of words in external corpora, inspired by previous studies on narrative event chains (Chambers and Jurafsky, 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 80, |
|
"end": 99, |
|
"text": "[Tan and Lee, 2015]", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 481, |
|
"text": "(Chambers and Jurafsky, 2008)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature-Based Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 ConceptNet relations (CR): cr 1..3,1..|R| . R = {r 1 , r 2 , . . .} is the set of ConceptNet relation types (e.g., ''CapableOf'' and ''PartOf'') . cr i,j is the number of relation triples (w 1 , r j , w 2 ) that appear in the ConceptNet (Speer et al., 2017) , where w 1 represents a word in answer option O i , w 2 represents a word in D, and the relation type r j \u2208 R. Similar to the motivation for using PMI, we use CR to capture the association between an answer option and the source dialogue based on raw co-occurrence counts in the commonsense knowledge base.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 146, |
|
"text": "''CapableOf'' and ''PartOf'')", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 259, |
|
"text": "(Speer et al., 2017)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature-Based Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 i represent the maximum cosine similarity between O i and consecutive words of the same length in D and D s Q , respectively (Expression (6) in Section 4.2). We use ConceptNet embeddings (Speer et al., 2017) because they leverage the general world knowledge graph.", |
|
"cite_spans": [ |
|
{ |
|
"start": 189, |
|
"end": 209, |
|
"text": "(Speer et al., 2017)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature-Based Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Our end-to-end neural model is based on a generative pre-trained language model (LM). We follow the framework of finetuned transformer LM (FTLM) (Radford et al., 2018) and make modifications for dialogue-based reading comprehension.", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 167, |
|
"text": "(Radford et al., 2018)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-To-End Neural Network", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The training procedure of FTLM consists of two stages. The first stage is to learn a highcapacity language model on a large-scale unsupervised corpus of tokens U = {u 1 , . . . , u n } by maximizing the following likelihood:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-To-End Neural Network", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "L LM (U) = i log P (u i | u i\u2212k , . . . , u i\u22121 ; \u0398) (10)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-To-End Neural Network", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "where k is the context window size, and the conditional probability P is modeled by a multilayer transformer decoder with parameters \u0398. In the second stage, the model is adapted to a labeled data set C, where each instance consists of a sequence of input tokens x 1 , . . . , x m with a label y, by maximizing:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-To-End Neural Network", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "L(C) = x,y log P (y | x 1 , . . . , x m ) + \u03bbL LM (C) (11)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-To-End Neural Network", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "where P (y | x 1 , . . . , x m ) is obtained by a linear + softmax layer over the final transformer block's activation, and \u03bb is the weight for language model. For multiple-choice reading comprehension, the input tokens x 1 , . . . , x m come from the concatenation of a start token, dialogue, question, a delimiter token, answer option, and an end token; y indicates if the answer option is correct. We refer readers to Radford et al. (2018) for more details.", |
|
"cite_spans": [ |
|
{ |
|
"start": 421, |
|
"end": 442, |
|
"text": "Radford et al. (2018)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-To-End Neural Network", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Because the original FTLM framework already leverages rich linguistic information from a large unlabeled corpus, which can be regarded as a type of tacit general world knowledge, we investigate whether additional dialogue structure can further improve this strong baseline. We propose speaker embedding to better capture dialogue structure. Specifically, in the original framework, given an input context (u \u2212k , . . . , u \u22121 ) of the transformer, the encoding of u \u2212i is we we we(u \u2212i ) + pe pe pe(i), where we we we (\u2022) is the word embedding, and pe pe pe(\u2022) is the position embedding. When adapting \u0398 to DREAM, we change the encoding to we we we(u \u2212i ) + pe pe pe(i)+se se se(u \u2212i , s Q ), where the speaker embedding se se se(u \u2212i , s Q ) is (a) 0 if the token u \u2212i is not in the dialogue (i.e. it is either a start/end/delimiter token or a token in the question/option); (b) e e e target if the token is spoken by s Q ; (c) e e e rest if the token is in the dialogue but not spoken by s Q . e e e target and e e e rest are trainable and initialized randomly. We show the overall framework in Figure 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 518, |
|
"end": 521, |
|
"text": "(\u2022)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1097, |
|
"end": 1105, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "End-To-End Neural Network", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "For all the models, we conduct coreference resolution to determine speaker mentions of s Q based on simple heuristics. Particularly, we map three most common speaker abbreviations (i.e., ''M''; ''W'' and ''F'') that appear in dialogues to their eight most common corresponding mentions (i.e., ''man,'' ''boy,'' ''he,'' and ''his''; ''woman,'' ''girl,'' ''she,'' and ''her'') in questions. We keep speaker abbreviations unchanged, since neither replacing them with their corresponding full forms nor removing them contributes to the performance based on our experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 210, |
|
"text": "''M''; ''W'' and ''F'')", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 293, |
|
"end": 374, |
|
"text": "''man,'' ''boy,'' ''he,'' and ''his''; ''woman,'' ''girl,'' ''she,'' and ''her'')", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing and Training Details", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "For the neural model mentioned in Section 4.4, most of our parameter settings follow Radford et al. (2018) . We adopt the same preprocessing procedure and use their publicly released language model, which is pre-trained on the BooksCorpus data set (Zhu et al., 2015) . We set the batch size to 8, language model weight \u03bb to 2, and maximum epochs of training to 10.", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 106, |
|
"text": "Radford et al. (2018)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 248, |
|
"end": 266, |
|
"text": "(Zhu et al., 2015)", |
|
"ref_id": "BIBREF51" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing and Training Details", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "For other models, we use the following preprocessing steps. We tokenize and lowercase the corpus, convert number words to numeric digits, normalize time expressions to 24-hour numeric form, and address negation by removing interrogative sentences that receive ''no'' as the reply. We use the gradient boosting classifier implemented in the scikit-learn toolkit (Pedregosa et al., 2011) . We set the number of boosting iterations to 600 and keep the rest of hyperparameters unchanged. The distance upper bound K for PMI is set to 10.", |
|
"cite_spans": [ |
|
{ |
|
"start": 361, |
|
"end": 385, |
|
"text": "(Pedregosa et al., 2011)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing and Training Details", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "We perform several runs of machine learning models (Section 4.3 and Section 4.4) with randomness introduced by different random seeds and/or GPU non-determinism and select the model or models (for ensemble) that perform best on the development set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing and Training Details", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "We implement several baselines, including rulebased methods and state-of-the-art neural models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "\u2022 Word Matching This strong baseline (Yih et al., 2013 ) selects the answer option that has the highest count of overlapping words with the given dialogue.", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 54, |
|
"text": "(Yih et al., 2013", |
|
"ref_id": "BIBREF49" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "\u2022 Sliding Window We implement the sliding window approach (i.e., arg max i sw * i ) and its distance-based variation DSW (i.e., (Richardson et al., 2013) introduced in Section 4.2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 153, |
|
"text": "(Richardson et al., 2013)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "arg max i sw * i \u2212 d * i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "\u2022 Enhanced Distance-Based Sliding Window (DSW++) We also use general world knowledge and speaker-focused information to improve the original sliding window baseline, formulated in Expression 8 (Section 4.2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "\u2022 Stanford Attentive Reader This neural baseline compares each candidate answer (i.e., entity) representation to the question-aware document representation built with attention mechanism (Hermann et al., 2015; . Lai et al. (2017) add a bilinear operation to compare document and answer option representations to answer multiple-choice questions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 209, |
|
"text": "(Hermann et al., 2015;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 212, |
|
"end": 229, |
|
"text": "Lai et al. (2017)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "\u2022 Gated-Attention Reader The baseline models multiplicative question-specific document representations based on a gated-attention mechanism (Dhingra et al., 2017) , which are then compared to each answer option (Lai et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 162, |
|
"text": "(Dhingra et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 229, |
|
"text": "(Lai et al., 2017)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "\u2022 Co-Matching This state-of-the-art multiplechoice reading comprehension model explicitly treats question and answer option as two sequences and jointly matches them against a given document (Wang et al., 2018b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 191, |
|
"end": 211, |
|
"text": "(Wang et al., 2018b)", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "\u2022 Finetuned Transformer LM This is a general task-agnostic model introduced in Section 4.4, which achieves the best reported performance on several tasks requiring multisentence reasoning (Radford et al., 2018) . (Yih et al., 2013) 41.7 42.0 Sliding Window (SW) (Richardson et al., 2013) 42.6 42.5 Distance-Based Sliding Window (DSW) (Richardson et al., 2013) 44.4 44.6", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 210, |
|
"text": "(Radford et al., 2018)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 231, |
|
"text": "(Yih et al., 2013)", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 287, |
|
"text": "(Richardson et al., 2013)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 359, |
|
"text": "(Richardson et al., 2013)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Stanford Attentive Reader (SAR) 40.2 39.8 Gated-Attention Reader (GAR) (Dhingra et al., 2017) 40.5 41.3 Co-Matching (CO) (Wang et al., 2018b) 45.6 45.5 Finetuned Transformer LM (FTLM) (Radford et al., 2018) 55.9 55.5", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 93, |
|
"text": "(Dhingra et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 141, |
|
"text": "(Wang et al., 2018b)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 206, |
|
"text": "(Radford et al., 2018)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Our Approaches: We do not investigate other ways of leveraging pre-trained deep models such as adding ELMo representations (Peters et al., 2018) as additional features to a neural model since recent studies show that directly fine-tuning a pre-trained language model such as FTLM is significantly superior on multiple-choice reading comprehension tasks (Radford et al., 2018; Chen et al., 2019) . We do not apply more recent extractive models such as AOA (Cui et al., 2017) and QANet (Yu et al., 2018) since they aim at precisely locating a span in a document. When adapted to solve questions with abstractive answer options, extractive models generally tend to perform less well Dhingra et al., 2017; Lai et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 144, |
|
"text": "(Peters et al., 2018)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 375, |
|
"text": "(Radford et al., 2018;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 394, |
|
"text": "Chen et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 473, |
|
"text": "(Cui et al., 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 484, |
|
"end": 501, |
|
"text": "(Yu et al., 2018)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 680, |
|
"end": 701, |
|
"text": "Dhingra et al., 2017;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 702, |
|
"end": 719, |
|
"text": "Lai et al., 2017)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "DSW++ (DSW", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We report the performance of the baselines introduced in Section 5.1 and our proposed approaches in Table 8 . We report the averaged accuracy of two annotators as the human performance. The proportion of valid questions (i.e., an unambiguous question with a unique correct answer option provided) that are manually checked by annotators on the annotated test and development sets is regarded as the human ceiling performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 107, |
|
"text": "Table 8", |
|
"ref_id": "TABREF12" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Surface matching is insufficient. Experimental results show that neural models that primarily exploit surface-level information (i.e., SAR, GAR, and CO) attain a performance level close to that of simple rule-based approaches (i.e., WM, SW, and DSW). The highest accuracy achieved by CO is 45.5%, a similar level of performance to the rule-based method DSW (44.6%).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "It is helpful to incorporate general world knowledge and dialogue structure. We see a significant gain of 5.5% in accuracy when enhancing DSW using general world knowledge from ConceptNet embeddings and considering speakerfocused information (Section 4.2). FTLM, which leverages rich external linguistic knowledge from thousands of books, already achieves a much higher accuracy (55.5%) compared with previous state-of-the-art machine comprehension models, indicating the effectiveness of general world knowledge. Experimental results show that our best single model FTLM++ significantly outperforms FTLM (p-value = 0.03), illustrating the usefulness of additional dialogue structure. Compared with the state-of-the-art neural reader Co-Matching that primarily explores surface-level information (45.5%), the tacit general world knowledge (in the pre-trained language model) and dialogue structure in FTLM++ lead to an absolute gain of 11.9% in accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Ensembling different types of methods can bring further improvements. We use the majority vote strategy to obtain the ensemble model performance. Although GBDT++ (52.8%) itself does not outperform FTLM++, GBDT++ can serve as a supplement to FTLM++ because they leverage different types of general world knowledge and model architectures. We achieve the highest accuracy (59.5%) by ensembling one GBDT++ and three FTLM++.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We conduct ablation tests to evaluate the individual components of our proposed approaches (Table 9 ). In Table 10 , we summarize the involved types of dialogue structure and general world knowledge in our approaches.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 99, |
|
"text": "(Table 9", |
|
"ref_id": "TABREF14" |
|
}, |
|
{ |
|
"start": 106, |
|
"end": 114, |
|
"text": "Table 10", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Tests", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Dialogue Structure Specifically, we observe 1.4% drop in accuracy if we set the target speaker s Q to * for all questions when we apply DSW++. We observe a similar performance drop when we remove speaker-focused features from GBDT++. In addition, removing speaker embeddings from FTLM++ leads to a 1.7% drop in accuracy (in this case, the model becomes the original FTLM). These results consistently indicate the usefulness of dialogue structure for dialogue understanding.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Tests", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We also investigate the effects of general world knowledge. After all, compared with health, taste is not so important.'' Moreover, if we train FTLM++ with randomly initialized transformer weights instead of weights pre-trained on the external corpus, the accuracy drops dramatically to 36.2%, which is only slightly better than a random baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "General World Knowledge", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Impact of Longer Turns The number of dialogue turns has a significant impact on the performance of FTLM++. As shown in Figure 2 , its performance reaches the peak when the number of turns ranges from 0 to 10, while it suffers severe performance drops when the given dialogue contains more turns. Both DSW++ (56.8%) and GBDT++ (57.4%) outperform FTLM++ (55.7%) when the number of turns ranges from 10 to 48. To deal with lengthy context, it may be helpful to first identify relevant sentences based on a question and its associated answer options rather than using the entire dialogue context as input.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 127, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Impact of Confusing Distractors For 54.5% of questions on the development set, the fuzzy matching score (Sikes, 2007) of at least one distractor answer option against the dialogue is higher than the score of the correct answer option. For questions that all models (i.e., DSW++, GBDT++, and FTLM++) fail to answer correctly, 73.0% of them contain at least one such confusing distractor answer option. The causes of this kind of errors can be roughly divided into two categories. First, the distractor is wrongly associated with the target speaker/s mentioned in the question (e.g., answer option A and C in D2-Q3 in Table 3 ). Second, although the claim in the distractor is supported by the dialogue, it is irrelevant to the question (e.g., D1-Q1-B in Table 1 ). A promising direction to solve this problem could be the construction of speaker-focused event chains (Chambers and Jurafsky, 2008) and advanced dialogue-specific coreference resolution systems for more reliable evidence context detection in a dialogue.", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 117, |
|
"text": "(Sikes, 2007)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 866, |
|
"end": 895, |
|
"text": "(Chambers and Jurafsky, 2008)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 616, |
|
"end": 623, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 753, |
|
"end": 760, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We further report the performance of the best single model FTLM++ and the GBDT++ baseline on the categories defined in Section 3.2 (Table 11) . Not surprisingly, both models perform worse than random guessing on math problems. While most of the math problems can be solved by one single linear equation, it is still difficult to apply recent neural math word problem solvers (Huang et al., 2018; Wang et al., 2018a) due to informal dialoguebased problem descriptions and the requirement of commonsense inference. For example, given the dialogue: ''W: The plane arrives at 10:50. It is already 10:40 now. Be quick! M: Relax. Your watch must be fast. There are still twenty minutes left.'' We need prior knowledge to infer that the watch of the man is showing incorrect time 10:40. Instead, 10:50 should be used as the reference time with the time interval ''twenty minutes left'' together to answer the question ''What time is it now?'' Results show that GBDT++ is superior to the fine-tuned language model on the questions under the category matching (68.1% vs. 57.0%) and the latter model is more capable of answering implicit questions (e.g., under the category summary, logic, and commonsense) which require aggregation of information from multiple sentences, the understanding of the entire dialogue, or the utilization of world knowledge. Therefore, it might be useful to leverage the strengths of individual models to solve different types of questions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 375, |
|
"end": 395, |
|
"text": "(Huang et al., 2018;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 415, |
|
"text": "Wang et al., 2018a)", |
|
"ref_id": "BIBREF47" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 141, |
|
"text": "(Table 11)", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Impact of Question Types", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We present DREAM, the first multiple-choice dialogue-based reading comprehension data set from English language examinations. Besides the multi-turn multi-party dialogue context, 85% of questions require multiple-sentence reasoning, and 34% of questions also require commonsense knowledge, making this task very challenging. We apply several popular reading comprehension models and find that surface-level information is insufficient. We incorporate general world knowledge and dialogue structure into rule-based and machine learning methods and show the effectiveness of these factors, suggesting a promising direction for dialogue-based reading comprehension. For future work, we are interested in problem generation for dialogues and investigating whether it will lead to more gains to pre-train a deep language model such as FTLM over large-scale dialogues from movies and TV shows instead of the BookCorpus data set (Zhu et al., 2015) used by previous work (Radford et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 922, |
|
"end": 940, |
|
"text": "(Zhu et al., 2015)", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 963, |
|
"end": 985, |
|
"text": "(Radford et al., 2018)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://app.grammarly.com.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank the editors and anonymous reviewers for their helpful feedback. We also thank Hai Wang from Toyota Technological Institute at Chicago for useful discussions and valuable comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A unified and general framework for argumentation-based negotiation", |
|
"authors": [ |
|
{ |
|
"first": "Leila", |
|
"middle": [], |
|
"last": "Amgoud", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the AAMAS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leila Amgoud, Yannis Dimopoulos, and Pavlos Moraitis. 2007. A unified and general frame- work for argumentation-based negotiation. In Proceedings of the AAMAS, pages 1-8. New York, NY, USA.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Embracing data abundance: Booktest data set for reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Ondrej", |
|
"middle": [], |
|
"last": "Bajgar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rudolf", |
|
"middle": [], |
|
"last": "Kadlec", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ondrej Bajgar, Rudolf Kadlec, and Jan Kleindienst. 2016. Embracing data abundance: Booktest data set for reading comprehension. CoRR, cs.CL/1610.00956v1.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "NLTK: the natural language toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Loper", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the ACL on Interactive poster and demonstration sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "31--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven Bird and Edward Loper. 2004. NLTK: the natural language toolkit. In Proceedings of the ACL on Interactive poster and demonstration sessions, pages 31-34. Barcelona, Spain.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Unsupervised learning of narrative event chains", |
|
"authors": [ |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Chambers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "789--797", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nathanael Chambers and Dan Jurafsky. 2008. Unsupervised learning of narrative event chains. In Proceedings of the ACL, pages 789-797. Columbus, OH.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A thorough examination of the CNN/Daily Mail reading comprehension task", |
|
"authors": [ |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Bolton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2358--2367", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danqi Chen, Jason Bolton, and Christopher D. Manning. 2016. A thorough examination of the CNN/Daily Mail reading comprehension task. In Proceedings of the ACL, pages 2358-2367. Berlin, Germany.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Character identification on multiparty conversation: Identifying mentions of characters in TV shows", |
|
"authors": [ |
|
{ |
|
"first": "Yu-Hsin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinho", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the SIGDial", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "90--100", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu-Hsin Chen and Jinho D. Choi. 2016. Character identification on multiparty conversation: Iden- tifying mentions of characters in TV shows. In Proceedings of the SIGDial, pages 90-100. Los Angeles, CA.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Convolutional spatial attention model for reading comprehension with multiple-choice questions", |
|
"authors": [ |
|
{ |
|
"first": "Zhipeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wentao", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shijin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guoping", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhipeng Chen, Yiming Cui, Wentao Ma, Shijin Wang, and Guoping Hu. 2019. Convolutional spatial attention model for reading compre- hension with multiple-choice questions. In Proceedings of the AAAI. Honolulu, HI.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "QuAC: Question answering in context", |
|
"authors": [ |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "He", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen-Tau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2174--2184", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eunsol Choi, He He, Mohit Iyyer, Mark Yatskar, Wen-tau Yih, Yejin Choi, Percy Liang, and Luke Zettlemoyer. 2018. QuAC: Question an- swering in context. pages 2174-2184. Brussels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Combining retrieval, statistics, and inference to answer elementary science questions", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tushar", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Sabharwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oyvind", |
|
"middle": [], |
|
"last": "Tafjord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Turney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Khashabi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2580--2586", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Clark, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Oyvind Tafjord, Peter D. Turney, and Daniel Khashabi. 2016. Combining re- trieval, statistics, and inference to answer ele- mentary science questions. In Proceedings of the AAAI, pages 2580-2586. Phoenix, AZ.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Attentionover-attention neural networks for reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhipeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Si", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shijin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guoping", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "593--602", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yiming Cui, Zhipeng Chen, Si Wei, Shijin Wang, Ting Liu, and Guoping Hu. 2017. Attention- over-attention neural networks for reading comprehension. In Proceedings of the ACL, pages 593-602. Vancouver, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Gated-attention readers for text comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Bhuwan", |
|
"middle": [], |
|
"last": "Dhingra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanxiao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1832--1846", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bhuwan Dhingra, Hanxiao Liu, Zhilin Yang, William Cohen, and Ruslan Salakhutdinov. 2017. Gated-attention readers for text com- prehension. In Proceedings of the ACL, pages 1832-1846. Vancouver, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "SearchQA: A new Q&A data set augmented with context from a search engine", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Dunn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Levent", |
|
"middle": [], |
|
"last": "Sagun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Higgins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Volkan", |
|
"middle": [], |
|
"last": "Ugur Guney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cirik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Dunn, Levent Sagun, Mike Higgins, V Ugur Guney, Volkan Cirik, and Kyunghyun Cho. 2017. SearchQA: A new Q&A data set augmented with context from a search engine. CoRR, cs.CL/1704.05179v3.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Verb physics: Relative physical knowledge of actions and objects", |
|
"authors": [ |
|
{ |
|
"first": "Maxwell", |
|
"middle": [], |
|
"last": "Forbes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "266--276", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maxwell Forbes and Yejin Choi. 2017. Verb physics: Relative physical knowledge of actions and objects. In Proceedings of the ACL, pages 266-276. Vancouver, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Teaching machines to read and comprehend", |
|
"authors": [ |
|
{ |
|
"first": "Karl", |
|
"middle": [], |
|
"last": "Moritz Hermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Kocisky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lasse", |
|
"middle": [], |
|
"last": "Espeholt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Kay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mustafa", |
|
"middle": [], |
|
"last": "Suleyman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1693--1701", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karl Moritz Hermann, Tomas Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend. In Proceed- ings of the NIPS, pages 1693-1701. Montreal, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "The goldilocks principle: Reading children's books with explicit memory representations", |
|
"authors": [ |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the ICLR. Caribe", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felix Hill, Antoine Bordes, Sumit Chopra, and Jason Weston. 2016. The goldilocks principle: Reading children's books with explicit memory representations. In Proceedings of the ICLR. Caribe Hilton, Puerto Rico.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Neural math word problem solver with reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Danqing", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "213--223", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danqing Huang, Jing Liu, Chin-Yew Lin, and Jian Yin. 2018. Neural math word problem solver with reinforcement learning. In Proceedings of the COLING, pages 213-223. Santa Fe, NM.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "TriviaQA: A large scale distantly supervised challenge data set for reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Weld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mandar Joshi, Eunsol Choi, Daniel S. Weld, and Luke Zettlemoyer. 2017. TriviaQA: A large scale distantly supervised challenge data set for reading comprehension. CoRR, cs.CL/1705.03551v2.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Looking beyond the surface: A challenge set for reading comprehension over multiple sentences", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Khashabi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Snigdha", |
|
"middle": [], |
|
"last": "Chaturvedi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shyam", |
|
"middle": [], |
|
"last": "Upadhyay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "252--262", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Khashabi, Snigdha Chaturvedi, Michael Roth, Shyam Upadhyay, and Dan Roth. 2018. Looking beyond the surface: A challenge set for reading comprehension over multiple sentences. In Proceedings of the NAACL-HLT, pages 252-262. New Orleans, LA.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The narrativeqa reading comprehension challenge", |
|
"authors": [ |
|
{ |
|
"first": "Tom\u00e1\u0161", |
|
"middle": [], |
|
"last": "Ko\u010disk\u1ef3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Schwarz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karl", |
|
"middle": [ |
|
"Moritz" |
|
], |
|
"last": "Hermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G\u00e1abor", |
|
"middle": [], |
|
"last": "Melis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Transactions of the Association of Computational Linguistics", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "317--328", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom\u00e1\u0161 Ko\u010disk\u1ef3, Jonathan Schwarz, Phil Blunsom, Chris Dyer, Karl Moritz Hermann, G\u00e1abor Melis, and Edward Grefenstette. 2018. The narrativeqa reading comprehension challenge. Transactions of the Association of Computational Linguistics, 6:317-328.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "RACE: Largescale reading comprehension data set from examinations", |
|
"authors": [ |
|
{ |
|
"first": "Guokun", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qizhe", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanxiao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "785--794", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guokun Lai, Qizhe Xie, Hanxiao Liu, Yiming Yang, and Eduard Hovy. 2017. RACE: Large- scale reading comprehension data set from examinations. In Proceedings of the EMNLP, pages 785-794. Copenhagen, Denmark.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "ODSQA: Open-domain spoken question answering data set", |
|
"authors": [ |
|
{ |
|
"first": "Chia-Hsuan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shang-Ming", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huan-Cheng", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hung-Yi", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chia-Hsuan Lee, Shang-Ming Wang, Huan-Cheng Chang, and Hung-Yi Lee. 2018. ODSQA: Open-domain spoken question answering data set. CoRR, cs.CL/1808.02280v1.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Spoken SQuAD: A study of mitigating the impact of speech recognition errors on listening comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Chia-Hsuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Szu-Lin", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chi-Liang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hung-Yi", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chia-Hsuan Li, Szu-Lin Wu, Chi-Liang Liu, and Hung-yi Lee. 2018. Spoken SQuAD: A study of mitigating the impact of speech recognition errors on listening comprehension. CoRR, cs.CL/1804.00320v1.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Generating Wikipedia by summarizing long sequences", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Etienne", |
|
"middle": [], |
|
"last": "Saleh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Pot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Goodrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Sepassi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter J Liu, Mohammad Saleh, Etienne Pot, Ben Goodrich, Ryan Sepassi, Lukasz Kaiser, and Noam Shazeer. 2018. Generating Wikipedia by summarizing long sequences. In Proceedings of the ICLR. Vancouver, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Types of common-sense knowledge needed for recognizing textual entailment", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Lobue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Yates", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "329--334", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter LoBue and Alexander Yates. 2011. Types of common-sense knowledge needed for rec- ognizing textual entailment. In Proceedings of the ACL, pages 329-334. Portland, OR.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Challenging reading comprehension on daily conversation: Passage completion on multiparty dialog", |
|
"authors": [ |
|
{ |
|
"first": "Kaixin", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Jurczyk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinho", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2039--2048", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaixin Ma, Tomasz Jurczyk, and Jinho D. Choi. 2018. Challenging reading comprehension on daily conversation: Passage completion on multi- party dialog. In Proceedings of the NAACL- HLT, pages 2039-2048. New Orleans, LA.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Can a suit of armor conduct electricity? A new data set for open book question answering", |
|
"authors": [ |
|
{ |
|
"first": "Todor", |
|
"middle": [], |
|
"last": "Mihaylov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tushar", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Sabharwal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Todor Mihaylov, Peter Clark, Tushar Khot, and Ashish Sabharwal. 2018. Can a suit of armor conduct electricity? A new data set for open book question answering. In Proceedings of the EMNLP. Brussels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A corpus and evaluvation framework for deeper understanding of commonsense stories", |
|
"authors": [ |
|
{ |
|
"first": "Nasrin", |
|
"middle": [], |
|
"last": "Mostafazadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Chambers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Vanderwende", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushmeet", |
|
"middle": [], |
|
"last": "Kohli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Allen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "839--849", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. 2016. A corpus and evaluvation framework for deeper understanding of commonsense stories. In Proceedings of the NAACL-HLT, pages 839-849. San Diego, CA.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "How large a vocabulary is needed for reading and listening? Canadian Modern Language Review", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nation", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "63", |
|
"issue": "", |
|
"pages": "59--82", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "I Nation. 2006. How large a vocabulary is needed for reading and listening? Canadian Modern Language Review, 63:59-82.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "MS MARCO: A human generated machine reading comprehension data set", |
|
"authors": [ |
|
{ |
|
"first": "Tri", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mir", |
|
"middle": [], |
|
"last": "Rosenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xia", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saurabh", |
|
"middle": [], |
|
"last": "Tiwary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rangan", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tri Nguyen, Mir Rosenberg, Xia Song, Jianfeng Gao, Saurabh Tiwary, Rangan Majumder, and Li Deng. 2016. MS MARCO: A human gen- erated machine reading comprehension data set. CoRR, cs.CL/1611.09268v2.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Who did What: A large-scale person-centered cloze data set", |
|
"authors": [ |
|
{ |
|
"first": "Takeshi", |
|
"middle": [], |
|
"last": "Onishi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mcallester", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2230--2235", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Takeshi Onishi, Hai Wang, Mohit Bansal, Kevin Gimpel, and David McAllester. 2016. Who did What: A large-scale person-centered cloze data set. In Proceedings of the EMNLP, pages 2230-2235. Austin, TX.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "SemEval-2018 Task 11: Machine comprehension using commonsense knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Ostermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashutosh", |
|
"middle": [], |
|
"last": "Modi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Thater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Pinkal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the SemEval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "747--757", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon Ostermann, Michael Roth, Ashutosh Modi, Stefan Thater, and Manfred Pinkal. 2018. SemEval-2018 Task 11: Machine compre- hension using commonsense knowledge. In Proceedings of the SemEval, pages 747-757. New Orleans, LA.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Scikitlearn: Machine learning in python", |
|
"authors": [ |
|
{ |
|
"first": "Fabian", |
|
"middle": [], |
|
"last": "Pedregosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ga\u00ebl", |
|
"middle": [], |
|
"last": "Varoquaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Gramfort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bertrand", |
|
"middle": [], |
|
"last": "Thirion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Grisel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mathieu", |
|
"middle": [], |
|
"last": "Blondel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Prettenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Dubourg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jake", |
|
"middle": [], |
|
"last": "Vanderplas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Cournapeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthieu", |
|
"middle": [], |
|
"last": "Brucher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthieu", |
|
"middle": [], |
|
"last": "Perrot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Duchesnay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2825--2830", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabian Pedregosa, Ga\u00ebl Varoquaux, Alexandre Gramfort, Vincent Michel, Bertrand Thirion, Olivier Grisel, Mathieu Blondel, Peter Prettenhofer, Ron Weiss, Vincent Dubourg, Jake Vanderplas, Alexandre Passos, David Cournapeau, Matthieu Brucher, Matthieu Perrot, and Eduard Duchesnay. 2011. Scikit- learn: Machine learning in python. Journal of Machine Learning Research, 12:2825-2830.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Overview of CLEF QA Entrance Exams Task", |
|
"authors": [ |
|
{ |
|
"first": "Anselmo", |
|
"middle": [], |
|
"last": "Penas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yusuke", |
|
"middle": [], |
|
"last": "Miyao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alvaro", |
|
"middle": [], |
|
"last": "Rodrigo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Eduard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noriko", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kando", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the CLEF", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1194--1200", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anselmo Penas, Yusuke Miyao, Alvaro Rodrigo, Eduard H Hovy, and Noriko Kando. 2014. Overview of CLEF QA Entrance Exams Task 2014. In Proceedings of the CLEF, pages 1194-1200. Sheffield, UK.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2227--2237", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contex- tualized word representations. In Proceed- ings of the NAACL-HLT, pages 2227-2237, New Orleans, LA.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Improving language understanding by generative pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language understanding by generative pre-training. Preprint, available at https://openai.com/blog/language- unsupervised/.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "questions for machine comprehension of text", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proceedings of the EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2383--2392", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "questions for machine comprehension of text. In Proceedings of the EMNLP, pages 2383-2392.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "CoQA: A conversational question answering challenge", |
|
"authors": [ |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siva Reddy, Danqi Chen, and Christopher D. Manning. 2018. CoQA: A conversational ques- tion answering challenge. CoRR, cs.CL/1808. 07042v1.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "MCTest: A challenge data set for the open-domain machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erin", |
|
"middle": [], |
|
"last": "Burges", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Renshaw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "193--203", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Richardson, Christopher JC Burges, and Erin Renshaw. 2013. MCTest: A challenge data set for the open-domain machine comprehen- sion of text. In Proceedings of the EMNLP, pages 193-203. Seattle, WA.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Overview of the NTCIR-11 QA-Lab Task", |
|
"authors": [ |
|
{ |
|
"first": "Hideyuki", |
|
"middle": [], |
|
"last": "Shibuki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kotaro", |
|
"middle": [], |
|
"last": "Sakamoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshinobu", |
|
"middle": [], |
|
"last": "Kano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teruko", |
|
"middle": [], |
|
"last": "Mitamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madoka", |
|
"middle": [], |
|
"last": "Ishioroshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Kelly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Itakura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tatsunori", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noriko", |
|
"middle": [], |
|
"last": "Mori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kando", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "NTCIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hideyuki Shibuki, Kotaro Sakamoto, Yoshinobu Kano, Teruko Mitamura, Madoka Ishioroshi, Kelly Y Itakura, Di Wang, Tatsunori Mori, and Noriko Kando. 2014. Overview of the NTCIR-11 QA-Lab Task. In NTCIR.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Fuzzy matching in theory and practice", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Sikes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Multilingual", |
|
"volume": "18", |
|
"issue": "6", |
|
"pages": "39--43", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Sikes. 2007. Fuzzy matching in theory and practice. Multilingual, 18(6):39-43.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "ConceptNet 5.5: An Open Multilingual Graph of General Knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Robyn", |
|
"middle": [], |
|
"last": "Speer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Chin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Catherine", |
|
"middle": [], |
|
"last": "Havasi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4444--4451", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robyn Speer, Joshua Chin, and Catherine Havasi. 2017. ConceptNet 5.5: An Open Multilingual Graph of General Knowledge. In Proceedings of the AAAI, pages 4444-4451. San Francisco, CA.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "All who wander: On the prevalence and characteristics of multi-community engagement", |
|
"authors": [ |
|
{ |
|
"first": "Chenhao", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the WWW", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1056--1066", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenhao Tan and Lillian Lee. 2015. All who wander: On the prevalence and characteristics of multi-community engagement. In Proceed- ings of the WWW, pages 1056-1066. Florence, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Multi-range reasoning for machine comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Tay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anh", |
|
"middle": [], |
|
"last": "Luu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siu Cheung", |
|
"middle": [], |
|
"last": "Tuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Tay, Luu Anh Tuan, and Siu Cheung Hui. 2018. Multi-range reasoning for machine com- prehension. CoRR, cs.CL/1803.09074v1.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "NewsQA: A machine comprehension data set", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Trischler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xingdi", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Sordoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Bachman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaheer", |
|
"middle": [], |
|
"last": "Suleman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the RepL4NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "191--200", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Trischler, Tong Wang, Xingdi Yuan, Justin Harris, Alessandro Sordoni, Philip Bachman, and Kaheer Suleman. 2017. NewsQA: A machine comprehension data set. In Pro- ceedings of the RepL4NLP, pages 191-200. Vancouver, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Towards machine comprehension of spoken content: Initial toefl listening comprehension test by machine", |
|
"authors": [ |
|
{ |
|
"first": "Bo-Hsiang", |
|
"middle": [], |
|
"last": "Tseng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng-Syun", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hung-Yi", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lin-Shan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo-Hsiang Tseng, Sheng-Syun Shen, Hung-Yi Lee, and Lin-Shan Lee. 2016. Towards machine comprehension of spoken content: Initial toefl listening comprehension test by machine. In Proceedings of the Interspeech. San Francisco, CA.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Translating a math word problem to a expression tree", |
|
"authors": [ |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deng", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongxiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaojiang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1064--1069", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lei Wang, Yan Wang, Deng Cai, Dongxiang Zhang, and Xiaojiang Liu. 2018a. Translating a math word problem to a expression tree. In Proceedings of the EMNLP, pages 1064-1069. Brussels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "A co-matching model for multichoice reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Shuohang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mo", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shiyu", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuohang Wang, Mo Yu, Shiyu Chang, and Jing Jiang. 2018b. A co-matching model for multi- choice reading comprehension. In Proceedings of the ACL, pages 1-6. Melbourne, Australia.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Question answering using enhanced lexical semantic models", |
|
"authors": [ |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Wen-Tau Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrzej", |
|
"middle": [], |
|
"last": "Meek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pastusiak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1744--1753", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wen-tau Yih, Ming-Wei Chang, Christopher Meek, and Andrzej Pastusiak. 2013. Question answer- ing using enhanced lexical semantic models. In Proceedings of the ACL, pages 1744-1753. Sofia, Bulgaria.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "QANet: Combining local convolution with global self-attention for reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Adams", |
|
"middle": [ |
|
"Wei" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Dohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Norouzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adams Wei Yu, David Dohan, Minh-Thang Luong, Rui Zhao, Kai Chen, Mohammad Norouzi, and Quoc V Le. 2018. QANet: Combining local convolution with global self-attention for read- ing comprehension. In Proceedings of the ICLR. Vancouver, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Aligning books and movies: Towards story-like visual explanations by watching movies and reading books", |
|
"authors": [ |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raquel", |
|
"middle": [], |
|
"last": "Urtasun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE ICCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE ICCV, pages 19-27. Santiago, Chile.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Overall neural network framework (Section 4.4)." |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Performance comparison of different number of turns on the test set." |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "A sample DREAM problem that requires general world knowledge ( : the correct answer option)." |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>Dialogue 2 (D2)</td></tr><tr><td>W: Hey, Mike. Where have you been? I didn't see</td></tr><tr><td>you around these days?</td></tr><tr><td>M: I was hiding in my office. My boss gave me</td></tr><tr><td>loads of work to do, and I tried to finish it</td></tr><tr><td>before my birthday. Anyway, I am done now.</td></tr><tr><td>Thank goodness! How is everything going</td></tr><tr><td>with you?</td></tr><tr><td>W: I'm quite well. You know, tomorrow is</td></tr><tr><td>Christmas Day. Do you have any plans?</td></tr><tr><td>M: Well, to tell you the truth, I am more</td></tr><tr><td>than excited about my birthday, which will</td></tr><tr><td>come in two days. I am going to visit my</td></tr><tr><td>parents-in-law with my wife.</td></tr><tr><td>W: Wow, sounds great.</td></tr><tr><td>M: Definitely! This is my first time to spend my</td></tr><tr><td>birthday with them.</td></tr><tr><td>W: Do they live far away from here?</td></tr><tr><td>M: A little bit. We planned to take the train, but</td></tr><tr><td>considering the travel peak, my wife strongly</td></tr><tr><td>suggested that we go to the airport right after</td></tr><tr><td>we finish our work this afternoon. How about</td></tr><tr><td>you? What's your holiday plan?</td></tr><tr><td>W: Well, our situations are just the opposite. My</td></tr><tr><td>parents-in-law will come to my house, and</td></tr><tr><td>they wish to stay at home and have a quiet</td></tr><tr><td>Christmas Day. So I have to call my friends to</td></tr><tr><td>cancel our party that will be held at my house.</td></tr><tr><td>Q1 What is the date of the man's birthday?</td></tr><tr><td>A. 25th, December.</td></tr><tr><td>B. 26th, December.</td></tr><tr><td>C. 27th, December.</td></tr><tr><td>Q2 How will the man go to his wife's parents'</td></tr><tr><td>home?</td></tr><tr><td>A. By train.</td></tr><tr><td>B. By bus.</td></tr><tr><td>C. By plane.</td></tr><tr><td>Q3 What is the probable relationship between the</td></tr><tr><td>two speakers?</td></tr><tr><td>A. Husband and wife.</td></tr><tr><td>B. Friends.</td></tr><tr><td>C. Parent-in-law and son-in-law.</td></tr></table>", |
|
"num": null, |
|
"text": "Thanks, the same to you!" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "A complete sample DREAM problem ( : the correct answer option)." |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>Train</td><td>Dev</td><td>Test</td><td>All</td></tr><tr><td colspan=\"3\"># of dialogues 3,869 1,288 1,287</td><td>6,444</td></tr><tr><td colspan=\"4\"># of questions 6,116 2,040 2,041 10,197</td></tr></table>", |
|
"num": null, |
|
"text": "The overall statistics of DREAM. A turn is defined as an uninterrupted stream of speech from one speaker in a dialogue." |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "The separation of the training, development, and test sets in DREAM." |
|
}, |
|
"TABREF12": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "Performance in accuracy (%) on the DREAM data set. Performance marked by is reported based on 25% annotated questions from the development and test sets." |
|
}, |
|
"TABREF14": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "Ablation tests on the development set (%)." |
|
}, |
|
"TABREF16": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>as ''What do we learn about the man?'' whose</td></tr><tr><td>correct answer option ''He is health-conscious.''</td></tr><tr><td>is not explicitly mentioned in the source dialogue</td></tr><tr><td>''M: We had better start to eat onions frequently,</td></tr><tr><td>Linda. W: But you hate onions, don't you? M:</td></tr><tr><td>Until I learned from a report from today's paper</td></tr><tr><td>that they protect people from flu and colds.</td></tr></table>", |
|
"num": null, |
|
"text": "Types of dialogue structure and general world knowledge investigated in our approaches." |
|
} |
|
} |
|
} |
|
} |