|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:19:42.610444Z" |
|
}, |
|
"title": "ARES: A Reading Comprehension Ensembling Service", |
|
"authors": [ |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Ferritto", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research AI Yorktown Heights", |
|
"location": { |
|
"region": "NY" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research AI Yorktown Heights", |
|
"location": { |
|
"region": "NY" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rishav", |
|
"middle": [], |
|
"last": "Chakravarti", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research AI Yorktown Heights", |
|
"location": { |
|
"region": "NY" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research AI Yorktown Heights", |
|
"location": { |
|
"region": "NY" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Florian", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research AI Yorktown Heights", |
|
"location": { |
|
"region": "NY" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"William" |
|
], |
|
"last": "Murdock", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research AI Yorktown Heights", |
|
"location": { |
|
"region": "NY" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Avirup", |
|
"middle": [], |
|
"last": "Sil", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research AI Yorktown Heights", |
|
"location": { |
|
"region": "NY" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We introduce ARES (A Reading Comprehension Ensembling Service): a novel Machine Reading Comprehension (MRC) demonstration system which utilizes an ensemble of models to increase F1 by 2.3 points. While many of the top leaderboard submissions in popular MRC benchmarks such as the Stanford Question Answering Dataset (SQuAD) and Natural Questions (NQ) use model ensembles, the accompanying papers do not publish their ensembling strategies. In this work, we detail and evaluate various ensembling strategies using the NQ dataset. ARES leverages the CFO (Chakravarti et al., 2019) and Reac-tJS distributed frameworks to provide a scalable interactive Question Answering experience that capitalizes on the agreement (or lack thereof) between models to improve the answer visualization experience.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We introduce ARES (A Reading Comprehension Ensembling Service): a novel Machine Reading Comprehension (MRC) demonstration system which utilizes an ensemble of models to increase F1 by 2.3 points. While many of the top leaderboard submissions in popular MRC benchmarks such as the Stanford Question Answering Dataset (SQuAD) and Natural Questions (NQ) use model ensembles, the accompanying papers do not publish their ensembling strategies. In this work, we detail and evaluate various ensembling strategies using the NQ dataset. ARES leverages the CFO (Chakravarti et al., 2019) and Reac-tJS distributed frameworks to provide a scalable interactive Question Answering experience that capitalizes on the agreement (or lack thereof) between models to improve the answer visualization experience.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Machine Reading Comprension (MRC) involves computer systems that can take a question and some text and produce an answer to that question using the content in that text. This field has recently received considerable attention, yielding popular leaderboard challenges such as SQuAD (Rajpurkar et al., 2016 (Rajpurkar et al., , 2018 and NQ (Kwiatkowski et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 281, |
|
"end": 304, |
|
"text": "(Rajpurkar et al., 2016", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 305, |
|
"end": 330, |
|
"text": "(Rajpurkar et al., , 2018", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 364, |
|
"text": "(Kwiatkowski et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Currently, the top submissions on both the SQuAD and NQ leaderboards combine multiple system outputs. These ensembled systems traditionally outperform single models by 1-4 Fmeasure. Unfortunately, many of the papers for these systems provide little to no information about the ensembling techniques they use.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we use GAAMA, a prototype question-answering system using the MRC techniques of (Pan et al., 2019) , as our starting point and explore how to ensemble multiple MRC models from GAAMA 1 . We evaluate these techniques on the NQ short answer task. Using our ensemble of models, for each example (question, passage pair), we take the top predictions per system, group by span (answer extracted from the passage), normalize and aggregate the scores, take the mean score across systems for each span, and then take the highest scoring short and long answer spans as our final prediction. These improved ensembling techniques are applied to our MRC systems to produce stronger answers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 112, |
|
"text": "(Pan et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Whereas other systems such as (Chakravarti et al., 2019; Yang et al., 2019a) and Allen NLP's 2 make use of a single model, we are able to use multiple models to produce a stronger result. We further take advantage of the fact that both the individual model predictions and the ensembed predictions are returned to help increase explainability for the user. For the graphical interface we use a heatmap to show the level of (dis)agreement between the underlying models along with the \"best ensemble\" answer. An example of this can be seen in Figure 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 56, |
|
"text": "(Chakravarti et al., 2019;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 57, |
|
"end": 76, |
|
"text": "Yang et al., 2019a)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 541, |
|
"end": 549, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "More completely, our contributions include:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 A novel MRC demonstration system, which leverages multiple underlying MRC model predictions and ensembles them for the user.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 A system architecture that provides scalability to the system designer (by leveraging the cloud ready CFO 3 (Chakravarti et al., 2019) orchestration framework) and flexibility to add and remove models based on the desired latency versus accuracy trade-off. \u2022 A GUI with enhanced explainability that allows users to see the (dis)agreement of responses from individual models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 136, |
|
"text": "(Chakravarti et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 An analysis of various ensembling strategies with experimental results on the challenging NQ dataset which show that diversity of models is better for ensembling than seed variation. We detail the process for selecting the \"bestdiverse\" set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2 Related Work", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There have been multiple works creating systems utilizing MRC models. BERTserini (Yang et al., 2019a) is an end-to-end question answering system utilizing a BERT model. (Ma et al., 2019) creates an end-to-end dialogue tracking system featuring an XLNet (Yang et al., 2019b) model. (Qu et al., 2020) performs conversational question answering and utilizes separate ALBERT (Lan et al., 2019) encoders for the question and passage in addition to a BERT model. Allen NLP's MRC demo provides reading comprehension through the use of a variety of different model types. However, to the best of our knowledge we are the first to propose using an ensemble of MRC models to provide a MRC service. There have likewise been multiple approaches to visualization of system results. BertSerini highlights the answer in the context. Allen NLP's demo allows using gradients to view the most important words in the passage. ARES allows for viewing the most important regions of the passage from the perspective of different models in addition to boxing in the ensembled answer as seen in Figure 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 101, |
|
"text": "(Yang et al., 2019a)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 186, |
|
"text": "(Ma et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 273, |
|
"text": "(Yang et al., 2019b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 298, |
|
"text": "(Qu et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 371, |
|
"end": 389, |
|
"text": "(Lan et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1071, |
|
"end": 1080, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ensembled MRC Systems", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Many of the top recent MRC systems publish few details on their ensembling strategies. Systems such as Liu et al., 2019; Wang et al., 2019; Lan et al., 2019; Group, 2017; Seo et al., 2016) report using ensembles of 5 to 18 models to gain 1.3 -4 F1 points on tasks such as GLUE, SQuAD 1.0, and SQuAD 2.0; unfortunately most of these systems report little information on their ensembling techniques. (Liu et al., 2020) reports slightly more information: gaining 1.8 and 0.6 F1 points short answer (SA) and long answer (LA) respectively on the NQ dev set with an ensemble of three models with different hyperparameters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 120, |
|
"text": "Liu et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 139, |
|
"text": "Wang et al., 2019;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 157, |
|
"text": "Lan et al., 2019;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 158, |
|
"end": 170, |
|
"text": "Group, 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 188, |
|
"text": "Seo et al., 2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 416, |
|
"text": "(Liu et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensembling Techniques", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We also consider work in the field of information retrieval (IR) as a way to aggregate multiple scores for the same span. Similar to the popular Comb-SUM and CombMNZ (Kurland and Culpepper, 2018; Wu, 2012) methods, considering the spans as the \"documents\", we use span-score weighted aggregation in our noisy-or aggregator. Futher, we additionally incorporate the use of rank-based scoring from Borda (Young, 1974) and RRF (Cormack et al., 2009) for our exponential sum approach (in addition to utilizing score for this approach). We finally consider a reciprocal rank sum aggregation strategy based on the ideas in RRF (Cormack et al., 2009) . To our knowledge this is the first published application of IR methods for this purpose.", |
|
"cite_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 195, |
|
"text": "(Kurland and Culpepper, 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 196, |
|
"end": 205, |
|
"text": "Wu, 2012)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 414, |
|
"text": "(Young, 1974)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 445, |
|
"text": "(Cormack et al., 2009)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 620, |
|
"end": 642, |
|
"text": "(Cormack et al., 2009)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensembling Techniques", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We describe the architecture of the system and additionally provide an overview of the client (GUI) used in this demonstration. The system is composed of MRC and ensembling services which are orchestrated by CFO. The MRC services (in our case GAAMA) provide reading comprehension via a transformer model (Pan et al., 2019) ; multiple services utilizing different model architectures are run to extract answers for a given question and passage. After the MRC services extract their answers, they are all passed to ARES which ensembles the results. The ensembling algorithm used by ARES is detailed in Sections 4 and 5. Note that the MRC service only extracts short answers, therefore only those portions of our ensembling approach are used.", |
|
"cite_spans": [ |
|
{ |
|
"start": 304, |
|
"end": 322, |
|
"text": "(Pan et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Overview", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Both the ensembled and original answers are then returned to the caller, allowing the clients to display the final ensembled answers and the original answers they were generated from to the end user.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Overview", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "More completely, the system takes the following as input through a grpc (Talvar, 2016) interface: question, passage, minimum confidence score threshold \u03b4, maximum number of answers N , maximum number of answers per model n, and number of models k. These inputs are sent from the client (we discuss our client below) and received by the CFO node which orchestrates the containers. The choice of k is bounded on how many GAAMA containers are deployed (e.g. if there are 3 then k \u2208 {1, 2, 3}). By tweaking the parameter k, clients can opt for increased accuracy (higher k) or decreased latency (lower k) as when multiple models run on the same GPU the request latency increases. As depicted in Figure 2 (where there are 3 MRC models running), each of the k=2 GAAMA containers then receive the question and passage from CFO, returning at most n answers to CFO. These answers, together with their confidence scores, are then sent to the ensembler by CFO which produces at most N ensembled answers (each with confidence score at least \u03b4) and returns them to CFO. Finally, both the answers of the k models and the ensembled answers predicted by ARES are returned by CFO to the caller.", |
|
"cite_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 86, |
|
"text": "(Talvar, 2016)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 691, |
|
"end": 699, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System Overview", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The GUI client for our system is based on a Reac-tJS 4 web interface. A request is taken as input from the user and sent to the system where is is processed as described above. When an answer with sufficient confidence score is returned, it is displayed to the user as seen in Figure 1 . Both the ensembled answer and the individual answers are shown together with their respective confidence scores. These answers are also shown in the context of the original passage. The ensembled answer is boxed in. For the individual answers a character heatmap is created representing how many of the candidate answers each character appears in. This heatmap is used to highlight the passage different different colors corresponding to the heatmap (characters not used in any answers are not highlighted). Both the boxing and highlighting of answers are done using MarkJS 5 . Note that while these visualizations only show the top answer for each MRC model, n answers per model are ensembled together. If Figure 2 : Architecture of the the ARES system. We use GAAMA as our MRC service.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 277, |
|
"end": 285, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 995, |
|
"end": 1003, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System Overview", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "an answer with sufficient confidence score is not returned, this is relayed to the user through the GUI.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Overview", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We investigate a number of strategies for ensembling models on the NQ dataset. We use the NQ dataset as it is more realistic and challenging than SQuAD, as its questions were created by Google Search users prior to seeing the answer documents (so they do not suffer from observational bias). In order to formally compare approaches we partition the NQ dev set into \"dev-train\" and \"dev-test\" by taking the first three dev files for the \"train\" set and using the last two for the \"test\" set (the original dev set for NQ is partitioned into 5 files for distribution). This yields \"train\" and \"test\" sets of 4,653 and 3,177 examples respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For each strategy considered we search for the best k-model ensemble over the \"train\" set and then evaluate on the \"test\" set. For these experiments we use k = 4 as this is the number of models that we can decode in the 24 hours (the limit for the NQ leaderboard). We begin by outlining our core strategy that underlies the approaches we have investigated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Using this strategy we investigate a baseline approach of ensembling multiple versions of the same model trained with different seeds. We then investigate search strategies for choosing the best models from candidates trained with different hyperparameters, in addition to different normalization and aggregation strategies that are used on a set of candidates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For each example processed by the k systems being ensembled, our system assigns a score to each long and short span according to the normalization and aggregation strategies (see below). Note that our system currently only predicts single short spans rather than sets, so we currently score each short span independently.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Core Strategy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We use the top-20 candidate long and short answers (LA and SA respectively) for each system. We experimented with additional values, but empirically found 20 to provide an ideal accuracy versus latency trade-off given hardware resources. To combine systems we take the arithmetic mean of the scores for each long and short span predicted by at least one system. We have experimented with other approaches such as median, geometric mean, and harmonic mean; however these are omitted here as they resulted in much lower scores than arithmetic mean. For spans which are only predicted by some systems a score of zero is assigned (for the systems which do not predict the span) to penalize spans which are only predicted by some systems. The predicted long span is then the span with the greatest arithmetic mean. Similarly for short answers the predicted span is the one with the greatest arithmetic mean (it must also be in a non-null long answer span).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Core Strategy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We first examine the baseline approach of ensembling k versions of the same model trained with the same hyperparameters, only varying the seed between models. We use the model with the best hyperparameters based on (Pan et al., 2019) having the highest sum of short and long answer F1 scores on dev. This model is trained k \u22121 additional times with different seeds and then they are all ensembled using the core strategy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 233, |
|
"text": "(Pan et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Seed Ensembles", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We consider two main strategies when searching for ensembles: exhaustive and greedy. These search over model candidates with different hyperparameters as described in (Pan et al., 2019) . Note that we also considered a \"simple greedy\" approach where the k best models on dev were selected, however this underperformed other approaches by 1 -2 F1 points.", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 185, |
|
"text": "(Pan et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Search Strategies", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In exhaustive search we consider all possible ensembles, whereas in greedy search we build the ensemble one model at a time by looking for which model we can add to an i model ensemble to make the best i + 1 model ensemble.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Search Strategies", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In the exhaustive search approach where we consider each of the m k ensembles of k candidates from our group of m models. We then use our core strategy for each ensemble to obtain short and long answer F1 scores for each ensemble. After searching all possible ensembles we return two ensembles: (i) the ensemble with the highest long answer F1 score and (ii) the ensemble with the highest short answer F1 score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Exhaustive Search (ES)", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "We select the models by greedily building 1, 2, ..., k model ensembles optimizing for short or long answer F1 using our core strategy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Greedy Search (GS)", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "We investigate two primary methods for normalizing the scores predicted for a span: not normalizing and logistic regression. We also investigated normalizing by dividing the scores for a span by the sum of all scores for the span, however we omit these results for brevity as they did not produce interesting results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Normalization Strategies", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "As a baseline we run experiments where the scores for a span are used as-is.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "None", |
|
"sec_num": "4.4.1" |
|
}, |
|
{ |
|
"text": "We also experiment with normalization using logistic regression where the scores from the top prediction for the \"dev-train\" examples is used to predict whether the example is correctly answered. In our experiments using the top example performed equally well to using the top 20 predictions per example to train on. We also experimented with using other features which did not improve performance. To ensure an appropriate regularization strength is used, we use the scikit-learn (Pedregosa et al., 2011) implementation of logistic regression with stratified 5-fold cross-validation to select the L2 regularization strength.", |
|
"cite_spans": [ |
|
{ |
|
"start": 481, |
|
"end": 505, |
|
"text": "(Pedregosa et al., 2011)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logistic Regression", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": "We consider a number of aggregation strategies to produce a single span score for each span predicted by a system for an example. These include the baseline approach of max as well as the exponentially decaying sum, reciprocal rank sum, and noisy-or methods influenced by IR. These approaches operate on a vector P of scores on which one of the above normalization strategies has been applied.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aggregation Strategies", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "For a vector P , score = max", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Max", |
|
"sec_num": "4.5.1" |
|
}, |
|
{ |
|
"text": "|P | i=1 P i . 4.5.2 Exponential Sum (ExS)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Max", |
|
"sec_num": "4.5.1" |
|
}, |
|
{ |
|
"text": "Based on the ideas of (Young, 1974; Cormack et al., 2009) , we sort P in descending order and take score =", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 35, |
|
"text": "(Young, 1974;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 36, |
|
"end": 57, |
|
"text": "Cormack et al., 2009)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Max", |
|
"sec_num": "4.5.1" |
|
}, |
|
{ |
|
"text": "|P | i=1 P i * \u03b2 i\u22121", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Max", |
|
"sec_num": "4.5.1" |
|
}, |
|
{ |
|
"text": "for some constant \u03b2 (we use \u03b2 = 0.5).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Max", |
|
"sec_num": "4.5.1" |
|
}, |
|
{ |
|
"text": "Based on the ideas of (Cormack et al., 2009) , we sort P in descending order and take score =", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 44, |
|
"text": "(Cormack et al., 2009)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reciprocal Rank Sum (RRS)", |
|
"sec_num": "4.5.3" |
|
}, |
|
{ |
|
"text": "|P | i=1 P i * 1 i 4.5.4 Noisy-Or (NO)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reciprocal Rank Sum (RRS)", |
|
"sec_num": "4.5.3" |
|
}, |
|
{ |
|
"text": "Based on the ideas of (Kurland and Culpepper, 2018; Wu, 2012), we take", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reciprocal Rank Sum (RRS)", |
|
"sec_num": "4.5.3" |
|
}, |
|
{ |
|
"text": "score = 1 \u2212 |P | i=1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reciprocal Rank Sum (RRS)", |
|
"sec_num": "4.5.3" |
|
}, |
|
{ |
|
"text": "(1 \u2212 P i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reciprocal Rank Sum (RRS)", |
|
"sec_num": "4.5.3" |
|
}, |
|
{ |
|
"text": "We examine two types of ensembling experiments: (i) ensembling the same model trained with different seeds and (ii) ensembling different models. Ensembling the same model trained on different seeds attempts to smooth the variance to produce a stronger result. On the other hand ensembling different models attempts to find models that may not be the strongest individually but harmonize well to produce strong results. Throughout this section we will use SA F 1 and LA F 1 to denote the short and long answer performance on \"dev-test\". Similarly we will use N S to indicate the number of models searched for an experiment and types SA and LA to indicate optimization for SA and LA F1 respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In Table 1 we find that there is a benefit to ensembling multiple versions of the same model trained with different seeds. Note that there is some data snooping ocuring here as the model is selected based on full dev performance (which is a superset of \"dev-test\"). RikiNet (Liu et al., 2020 ) and the 1 model performance reported above represent the top published NQ models at the time of writing this paper.", |
|
"cite_spans": [ |
|
{ |
|
"start": 274, |
|
"end": 291, |
|
"text": "(Liu et al., 2020", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Seed experiments", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We investigate the different search strategies in Table 2. We find that the greedy approach performs best overall, with the greedy ensemble optimized for LA performance performing the best on both short and long answer F1. Note that the numbers seen here, particularly when optimizing greedily for long answer performance are higher than those observed for ensembling the same model with multiple seeds. We hypothesize the superior generalization of greedy is due to exhaustive search \"overfitting\". For the remainder of this paper we will focus on greedy search optimized for long answer to keep the number of experiments presented to a manageable level. We investigate the impact of the IR inspired normalization strategies in Table 3 . The max experiment is as-before run without normalization to greedily optimize for long answer F1. The other experiments here are normalized with logistic regression, as our experiments showed that not normalizing decreased performance. We find that using max aggregation results in the best short answer F1 whereas using normalized noisy-or aggregation results in the best long answer F1. Based on these results, we run a final experiment using unnormal- ized max for short answers and logistic regression normalized noisy-or works for long answers. We find that this approach produces the strongest performance for both short and long answers with 59.3 SA F 1 and 71.5 LA F 1. Consequently we use unnormalized max ensembling of GAAMA answers (as GAAMA works on short answers) from 4 models in ARES. These numbers translate to a full dev performance of 59.3 short answer F1 and 71.1 long answer F1, which represents an improvement of 2.3 short answer F1 and 4.0 long answer F1 over our best single model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 729, |
|
"end": 736, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Main experiments", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "When doing manual error analysis on the NQ dev set, we do observe patterns suggesting that each of the ensemble components do bring different strengths over the single best model. For example, the Wikipedia article for Salary Cap contains multiple sentences related to the query \"when did the nfl adopt a salary cap\":", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensemble Candidate Contributions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The new Collective Bargaining Agreement (CBA) formulated in 2011 had an initial salary cap of $120 million...The cap was first introduced for the 1994 season and was initially $34.6 million. Both the cap and...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensemble Candidate Contributions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The later sentence contains the correct answer, 1994, since the question is asking for when the salary cap was initially adopted. One of our models A correctly makes this prediction whereas another one of our models B predicts 2011 from the earlier sentence. There are also cases where the correct answer span appears in the middle or later part of a paragraph and, though our model B predict the spans correctly, they assign a lower score (relative to its optimal threshold) than the model A. The position bias, therefore, appears to hurt the performance of the system in certain situations where location of the answer span relative to the paragraph is not a useful signal of correctness.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensemble Candidate Contributions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Finally, in our manual review we do see that the ensemble of these models performs better in the expected ways: (1) boosting scores when multiple models agree on an answer span even though no one model is extremely confident (2) reducing confidence when there is disagreement among models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensemble Candidate Contributions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We introduce a novel concept for a MRC service, ARES, which uses an ensemble of models to respond to requests. This provides for multiple advantages over the traditional single model paradigm: improved F1, the ability to control the performance vs runtime tradeoff for each individual request, and improved explaiability of results by showing both candidate answers in addition to the final ensembled answer. We outline several ensembling approaches for question answering models investigated for use in ARES and compare their performance on the NQ challenge. Our findings show that ensembling unique models outperforms ensembling the same model trained with different seeds and provide further analysis to show how ensembling diverse models improves performance. We also show that using unnormalized max aggregation for short answers and logistic regression normalized noisy-or aggregation for long answers yields an F1 improvement of 2 to 4 points over single model performance on the NQ challenge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "ARES can use any MRC model. 2 https://demo.allennlp.org/ reading-comprehension 3 https://github.com/IBM/ flow-compiler/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://reactjs.org/ 5 https://markjs.io/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Synthetic QA corpora generation with roundtrip consistency", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Alberti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Andor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Pitler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Alberti, Daniel Andor, Emily Pitler, Jacob De- vlin, and Michael Collins. 2019. Synthetic QA cor- pora generation with roundtrip consistency. CoRR, abs/1906.05416.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Cfo: A framework for building production nlp systems", |
|
"authors": [ |
|
{ |
|
"first": "Rishav", |
|
"middle": [], |
|
"last": "Chakravarti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cezar", |
|
"middle": [], |
|
"last": "Pendus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrzej", |
|
"middle": [], |
|
"last": "Sakrajda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Ferritto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vittorio", |
|
"middle": [], |
|
"last": "Castelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Murdock", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Florian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/d19-3006" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rishav Chakravarti, Cezar Pendus, Andrzej Sakrajda, Anthony Ferritto, Lin Pan, Michael Glass, Vittorio Castelli, J William Murdock, Radu Florian, Salim Roukos, and et al. 2019. Cfo: A framework for building production nlp systems. Proceedings of the 2019 Conference on Empirical Methods in Nat- ural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP): System Demonstrations.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Reciprocal rank fusion outperforms condorcet and individual rank learning methods", |
|
"authors": [ |
|
{ |
|
"first": "Gordon", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Cormack", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L A", |
|
"middle": [], |
|
"last": "Charles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Clarke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Buettcher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 32Nd International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '09", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "758--759", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1571941.1572114" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gordon V. Cormack, Charles L A Clarke, and Ste- fan Buettcher. 2009. Reciprocal rank fusion outper- forms condorcet and individual rank learning meth- ods. In Proceedings of the 32Nd International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '09, pages 758-759, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In NAACL-HLT.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "R-net: Machine reading comprehension with self-matching networks", |
|
"authors": [], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Natural Language Computing Group. 2017. R-net: Machine reading comprehension with self-matching networks.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Fusion in information retrieval: Sigir 2018 half-day tutorial", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Kurland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Culpepper", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1383--1386", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3209978.3210186" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oren Kurland and J. Culpepper. 2018. Fusion in infor- mation retrieval: Sigir 2018 half-day tutorial. pages 1383-1386.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Natural Questions: a benchmark for question answering research. TACL", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennimaria", |
|
"middle": [], |
|
"last": "Palomaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivia", |
|
"middle": [], |
|
"last": "Redfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Alberti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danielle", |
|
"middle": [], |
|
"last": "Epstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Kelcey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Red- field, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Matthew Kelcey, Jacob Devlin, Kenton Lee, Kristina N. Toutanova, Llion Jones, Ming-Wei Chang, Andrew Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. 2019. Natu- ral Questions: a benchmark for question answering research. TACL.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Albert: A lite bert for self-supervised learning of language representations", |
|
"authors": [ |
|
{ |
|
"first": "Zhenzhong", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piyush", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Soricut", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learn- ing of language representations.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Jiancheng Lv, and Nan Duan. 2020. Rikinet: Reading wikipedia pages for natural question answering", |
|
"authors": [ |
|
{ |
|
"first": "Dayiheng", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yeyun", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiusheng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daxin", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dayiheng Liu, Yeyun Gong, Jie Fu, Yu Yan, Jiusheng Chen, Daxin Jiang, Jiancheng Lv, and Nan Duan. 2020. Rikinet: Reading wikipedia pages for natural question answering.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "An end-to-end dialogue state tracking system with machine reading comprehension and wide & deep classification", |
|
"authors": [ |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zengfeng", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dawei", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiying", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoyuan", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaijie", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianping", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yue Ma, Zengfeng Zeng, Dawei Zhu, Xuan Li, Yiy- ing Yang, Xiaoyuan Yao, Kaijie Zhou, and Jianping Shen. 2019. An end-to-end dialogue state tracking system with machine reading comprehension and wide & deep classification.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Radu Florian, and Avirup Sil. 2019. Frustratingly easy natural question answering", |
|
"authors": [ |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rishav", |
|
"middle": [], |
|
"last": "Chakravarti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Ferritto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alfio", |
|
"middle": [], |
|
"last": "Gliozzo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lin Pan, Rishav Chakravarti, Anthony Ferritto, Michael Glass, Alfio Gliozzo, Salim Roukos, Radu Florian, and Avirup Sil. 2019. Frustratingly easy nat- ural question answering.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Scikit-learn: Machine learning in Python", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pedregosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Varoquaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gramfort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Thirion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Grisel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Blondel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Prettenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Dubourg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Vanderplas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Cournapeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Brucher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Perrot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Duchesnay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2825--2830", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "and Mohit Iyyer. 2020. Open-retrieval conversational question answering", |
|
"authors": [ |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Qu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liu", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cen", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minghui", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"Bruce" |
|
], |
|
"last": "Croft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen Qu, Liu Yang, Cen Chen, Minghui Qiu, W. Bruce Croft, and Mohit Iyyer. 2020. Open-retrieval conver- sational question answering.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Know what you don't know: Unanswerable questions for SQuAD", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1806.03822" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Robin Jia, and Percy Liang. 2018. Know what you don't know: Unanswerable ques- tions for SQuAD. arXiv preprint arXiv:1806.03822.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "SQuAD: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/d16-1264" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Bidirectional attention flow for machine comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Min Joon", |
|
"middle": [], |
|
"last": "Seo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aniruddha", |
|
"middle": [], |
|
"last": "Kembhavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Min Joon Seo, Aniruddha Kembhavi, Ali Farhadi, and Hannaneh Hajishirzi. 2016. Bidirectional at- tention flow for machine comprehension. CoRR, abs/1611.01603.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "grpc design and implementation. Talk by Varun Talwar", |
|
"authors": [ |
|
{ |
|
"first": "Varun", |
|
"middle": [], |
|
"last": "Talvar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Varun Talvar. 2016. grpc design and implementation. Talk by Varun Talwar, Product Manager at Google at Stanford, California [Accessed: 2019 06 20].", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In Inter- national Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Data Fusion in Information Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Shengli", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shengli Wu. 2012. Data Fusion in Information Re- trieval. Springer Publishing Company, Incorpo- rated.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "End-to-end open-domain question answering with", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuqing", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aileen", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xingyu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luchen", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/n19-4013" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Yang, Yuqing Xie, Aileen Lin, Xingyu Li, Luchen Tan, Kun Xiong, Ming Li, and Jimmy Lin. 2019a. End-to-end open-domain question answering with. Proceedings of the 2019 Conference of the North.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "XLNet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Le. 2019b. XLNet: Generalized autoregressive pretraining for language understanding. CoRR, abs/1906.08237.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "An axiomatization of borda's rule", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1974, |
|
"venue": "Journal of Economic Theory", |
|
"volume": "9", |
|
"issue": "1", |
|
"pages": "43--52", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/0022-0531(74)90073-8" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H.P Young. 1974. An axiomatization of borda's rule. Journal of Economic Theory, 9(1):43 -52.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "ARES client interface. The correct answer 2018 is boxed and the MRC system answers are highlighted based on a heatmap." |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>: Comparison of Search Strategies. All exper-</td></tr><tr><td>iments run without normalization using max aggrega-</td></tr><tr><td>tion. The first row is 4 seed ensemble from Table 1.</td></tr></table>", |
|
"text": "", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>: Comparison of IR inspired aggregation strate-</td></tr><tr><td>gies. All experiments run with a greedy search strategy</td></tr><tr><td>optimized exclusively for long answer F1 with logistic</td></tr><tr><td>regression normalization (except max which is not nor-</td></tr><tr><td>malized).</td></tr></table>", |
|
"text": "", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |