|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:28:59.133122Z" |
|
}, |
|
"title": "Interpreting Attention Models with Human Visual Attention in Machine Reading Comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Ekta", |
|
"middle": [], |
|
"last": "Sood", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Stuttgart", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Tannert", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Stuttgart", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Frassinelli", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Konstanz", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Bulling", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Stuttgart", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ngoc", |
|
"middle": [ |
|
"Thang" |
|
], |
|
"last": "Vu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Stuttgart", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "While neural networks with attention mechanisms have achieved superior performance on many natural language processing tasks, it remains unclear to which extent learned attention resembles human visual attention. In this paper, we propose a new method that leverages eye-tracking data to investigate the relationship between human visual attention and neural attention in machine reading comprehension. To this end, we introduce a novel 23 participant eye tracking dataset-MQA-RC, in which participants read movie plots and answered pre-defined questions. We compare state of the art networks based on long shortterm memory (LSTM), convolutional neural models (CNN) and XLNet Transformer architectures. We find that higher similarity to human attention and performance significantly correlates to the LSTM and CNN models. However, we show this relationship does not hold true for the XLNet models-despite the fact that the XLNet performs best on this challenging task. Our results suggest that different architectures seem to learn rather different neural attention strategies and similarity of neural to human attention does not guarantee best performance.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "While neural networks with attention mechanisms have achieved superior performance on many natural language processing tasks, it remains unclear to which extent learned attention resembles human visual attention. In this paper, we propose a new method that leverages eye-tracking data to investigate the relationship between human visual attention and neural attention in machine reading comprehension. To this end, we introduce a novel 23 participant eye tracking dataset-MQA-RC, in which participants read movie plots and answered pre-defined questions. We compare state of the art networks based on long shortterm memory (LSTM), convolutional neural models (CNN) and XLNet Transformer architectures. We find that higher similarity to human attention and performance significantly correlates to the LSTM and CNN models. However, we show this relationship does not hold true for the XLNet models-despite the fact that the XLNet performs best on this challenging task. Our results suggest that different architectures seem to learn rather different neural attention strategies and similarity of neural to human attention does not guarantee best performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Due to the high ambiguity of natural language, humans have to detect the most salient information in a given text and allocate a higher level of attention to specific regions to successfully process and comprehend it Poesio, 1994) . Eye tracking studies have been extensively used in various reading comprehension tasks to capture and investigate these attentive strategies (Rayner, 2009) and have, as such, helped to interpret cognitive processes and behaviors during reading.", |
|
"cite_spans": [ |
|
{ |
|
"start": 217, |
|
"end": 230, |
|
"text": "Poesio, 1994)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 388, |
|
"text": "(Rayner, 2009)", |
|
"ref_id": "BIBREF52" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Attention mechanisms in neural networks have been inspired by human visual attention (Bahdanau et al., 2014; Hassabis et al., 2017) . Similar to humans, they allow networks to focus and allocate more weight to different parts of the input sequence (Mnih et al., 2014; Chorowski et al., 2015; Xu et al., 2015; Vaswani et al., 2017; Jain and Wallace, 2019) . As such, neural attention can be viewed as a model of visual saliency that makes predictions over the elements in the network's input -whether a region in an image or a word in a sentence (Frintrop et al., 2010) . Attention mechanisms have recently gained significant popularity and have boosted performance in natural language processing tasks and computer vision (Ma and Zhang, 2003; Sun and Fisher, 2003; Seo et al., 2016; Veli\u010dkovi\u0107 et al., 2017; Sood et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 108, |
|
"text": "(Bahdanau et al., 2014;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 131, |
|
"text": "Hassabis et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 248, |
|
"end": 267, |
|
"text": "(Mnih et al., 2014;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 291, |
|
"text": "Chorowski et al., 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 308, |
|
"text": "Xu et al., 2015;", |
|
"ref_id": "BIBREF76" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 330, |
|
"text": "Vaswani et al., 2017;", |
|
"ref_id": "BIBREF70" |
|
}, |
|
{ |
|
"start": 331, |
|
"end": 354, |
|
"text": "Jain and Wallace, 2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 545, |
|
"end": 568, |
|
"text": "(Frintrop et al., 2010)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 722, |
|
"end": 742, |
|
"text": "(Ma and Zhang, 2003;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 743, |
|
"end": 764, |
|
"text": "Sun and Fisher, 2003;", |
|
"ref_id": "BIBREF65" |
|
}, |
|
{ |
|
"start": 765, |
|
"end": 782, |
|
"text": "Seo et al., 2016;", |
|
"ref_id": "BIBREF59" |
|
}, |
|
{ |
|
"start": 783, |
|
"end": 807, |
|
"text": "Veli\u010dkovi\u0107 et al., 2017;", |
|
"ref_id": "BIBREF71" |
|
}, |
|
{ |
|
"start": 808, |
|
"end": 826, |
|
"text": "Sood et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Although attention mechanisms can significantly improve performance for different NLP tasks, performance degrades when models are exposed to inherent properties of natural language, such as semantic ambiguity, inferring information, or out of domain data (Blohm et al., 2018; Niven and Kao, 2019) . These findings encourage work towards enhancing network's generalizability, deterring reliance on the closed-world assumption (Reiter, 1981) . In machine reading comprehension (MRC), it has been proposed that the more similar systems are to human behavior, the more suitable they become for such a task (Trischler et al., 2017; Luo et al., 2019; Zheng et al., 2019) . As a result, much recent work aims to build machines which read and understand text, mimicking specific aspects of human behavior (Hermann et al., 2015; Nguyen et al., 2016; Rajpurkar et al., 2016; Laszlo orders the house band to defiantly play \"La Marseillaise\". : An exemplary scan path shows a reading pattern. The red circle corresponds to the location of the current fixation. Its size is proportional to the duration of the fixation. Blohm et al., 2018) . To that end, by employing selfattention, researchers attempt to enhance comprehension by building models which better capture deep contextual and salient information (Vaswani et al., 2017; Devlin et al., 2019; Shen et al., 2018; Zhang et al., 2019) . As neural attention allows us to \"peek\" inside neural networks, it can help us to better understand how models make predictions (see Figure 1 ). Similarly, human visual attention (which is captured by physiological data such as eye tracking), allows us to quantify the relative importance of items within the visual field when reading texts (see Figure 2) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 255, |
|
"end": 275, |
|
"text": "(Blohm et al., 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 296, |
|
"text": "Niven and Kao, 2019)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 425, |
|
"end": 439, |
|
"text": "(Reiter, 1981)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 602, |
|
"end": 626, |
|
"text": "(Trischler et al., 2017;", |
|
"ref_id": "BIBREF67" |
|
}, |
|
{ |
|
"start": 627, |
|
"end": 644, |
|
"text": "Luo et al., 2019;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 645, |
|
"end": 664, |
|
"text": "Zheng et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 797, |
|
"end": 819, |
|
"text": "(Hermann et al., 2015;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 820, |
|
"end": 840, |
|
"text": "Nguyen et al., 2016;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 841, |
|
"end": 864, |
|
"text": "Rajpurkar et al., 2016;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 865, |
|
"end": 865, |
|
"text": "", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1108, |
|
"end": 1127, |
|
"text": "Blohm et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1296, |
|
"end": 1318, |
|
"text": "(Vaswani et al., 2017;", |
|
"ref_id": "BIBREF70" |
|
}, |
|
{ |
|
"start": 1319, |
|
"end": 1339, |
|
"text": "Devlin et al., 2019;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1340, |
|
"end": 1358, |
|
"text": "Shen et al., 2018;", |
|
"ref_id": "BIBREF60" |
|
}, |
|
{ |
|
"start": 1359, |
|
"end": 1378, |
|
"text": "Zhang et al., 2019)", |
|
"ref_id": "BIBREF82" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1514, |
|
"end": 1522, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 1727, |
|
"end": 1736, |
|
"text": "Figure 2)", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we propose a novel method that leverages human eye tracking data to investigate the relationship between neural performance and human attention strategies. Concretely, by interpreting and comparing the relationship between neural attention distributions of three state of the art MRC models to human visual attention, our research for the first time addresses the following questions: (i) What is the correlation between a particular network behavior and the human visual attention? (ii) Is the emulation of the human attention system the reason why neural models with attention mechanisms achieve state of the art results on machine reading comprehension tasks?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To answer these questions, we first extend the MovieQA dataset (Tapaswi et al., 2016) with eye tracking data. In addition, we present a novel vi-sualization tool to qualitatively compare the differences in attentive behaviors between neural models and humans by showing their patterns over time in a split screen mode. Second, as widely suggested in the cognitive science literature, we quantify human attention in terms of the word-level gaze duration recorded in our eye tracking dataset (Rouse and Morris, 1986; Milosavljevic and Cerf, 2008; Van Hooft and Born, 2012; Lipton, 2018; Wiegreffe and Pinter, 2019) . Third, we interpret the relationship between human attention and three state of the art systems based on CNN, LSTM, and XLNet (Hochreiter and Schmidhuber, 1997; using Kullback-Leibler divergence (Kullback and Leibler, 1951) . By doing so, we are able to compare, evaluate and better understand neural attention distributions on text across these attention models. To the best of our knowledge, we are the first to propose a systematic approach for comparing neural attention to human gaze data in machine reading comprehension.", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 85, |
|
"text": "(Tapaswi et al., 2016)", |
|
"ref_id": "BIBREF66" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 514, |
|
"text": "(Rouse and Morris, 1986;", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 515, |
|
"end": 544, |
|
"text": "Milosavljevic and Cerf, 2008;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 545, |
|
"end": 570, |
|
"text": "Van Hooft and Born, 2012;", |
|
"ref_id": "BIBREF69" |
|
}, |
|
{ |
|
"start": 571, |
|
"end": 584, |
|
"text": "Lipton, 2018;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 585, |
|
"end": 612, |
|
"text": "Wiegreffe and Pinter, 2019)", |
|
"ref_id": "BIBREF74" |
|
}, |
|
{ |
|
"start": 741, |
|
"end": 775, |
|
"text": "(Hochreiter and Schmidhuber, 1997;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 810, |
|
"end": 838, |
|
"text": "(Kullback and Leibler, 1951)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The main findings of our work are two-fold: First, we show that there is a statistically significant correlation between the CNNs and LSTMs model performances and similarity to human attention. Second, we show that the behavior of LSTM models is significantly more similar to humans than the XLNet ones even though the latter perform best on the MovieQA dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Eye tracking studies have been extensively used in cognitive science research to investigate human attention over time (Rayner, 1998; Wojciulik et al., 1998; Tsai et al., 2012; Eckstein et al., 2017) . Importantly, it has been demonstrated that attention and saccadic movements are strongly intertwined (Hoffman and Subramaniam, 1995; Deubel et al., 2000; Kristjansson, 2011) . Eye movement behaviors which are evoked from intricate information processing tasks, such as reading, can be used to identify visual attentional allocation Posner, 1980; Henderson, 1992) . As indicated in the Reading Model (Just and Carpenter, 1980), we assume a strong relationship between eye fixations, attention, and reading comprehension. In their eye tracking study, Just and Carpenter (1980) measured cognitive processing load using fixation duration. Specifically, they found that participants look longer or more often at items that are cognitively more complex, in order to successfully process them. Cognitive load increases when readers are \"accessing infrequent words, integrating information from important clauses and making inferences at the ends of sentences\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 133, |
|
"text": "(Rayner, 1998;", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 157, |
|
"text": "Wojciulik et al., 1998;", |
|
"ref_id": "BIBREF75" |
|
}, |
|
{ |
|
"start": 158, |
|
"end": 176, |
|
"text": "Tsai et al., 2012;", |
|
"ref_id": "BIBREF68" |
|
}, |
|
{ |
|
"start": 177, |
|
"end": 199, |
|
"text": "Eckstein et al., 2017)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 334, |
|
"text": "(Hoffman and Subramaniam, 1995;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 335, |
|
"end": 355, |
|
"text": "Deubel et al., 2000;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 375, |
|
"text": "Kristjansson, 2011)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 534, |
|
"end": 547, |
|
"text": "Posner, 1980;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 548, |
|
"end": 564, |
|
"text": "Henderson, 1992)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eye-tracking for Attention and Comprehension", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In the attention-based encoder-decoder architecture, rather than ignoring the internal encoder states, the attention mechanism takes advantage of these weights to generate a context vector, which is used by the decoder at various time steps (Bahdanau et al., 2014; Luong et al., 2015; Chorowski et al., 2015; Wang and Jiang; Yang et al., 2016; Dzendzik et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 241, |
|
"end": 264, |
|
"text": "(Bahdanau et al., 2014;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 284, |
|
"text": "Luong et al., 2015;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 285, |
|
"end": 308, |
|
"text": "Chorowski et al., 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 324, |
|
"text": "Wang and Jiang;", |
|
"ref_id": "BIBREF73" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 343, |
|
"text": "Yang et al., 2016;", |
|
"ref_id": "BIBREF78" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 366, |
|
"text": "Dzendzik et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Mechanisms", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In Transformer networks, the main differences to previous attentive models are that these networks are purely based on attention where LSTM or GRU units are not used, and attention is applied via selfattention and multi-headed attention (Vaswani et al., 2017) without any order constraint. Since the introduction of pre-trained Transformer networks, we have observed, on the one hand, a rise in state of the art performance across a multitude of tasks in NLP (Devlin et al., 2019; Radford et al., 2018; . On the other hand, much effort is needed to interpret these highly complex models (e.g. in Vig and Belinkov (2019) ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 237, |
|
"end": 259, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF70" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 480, |
|
"text": "(Devlin et al., 2019;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 481, |
|
"end": 502, |
|
"text": "Radford et al., 2018;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 596, |
|
"end": 619, |
|
"text": "Vig and Belinkov (2019)", |
|
"ref_id": "BIBREF72" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Mechanisms", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We use question answering (QA) tasks to compare human and machine attention. Although such tasks have been widely explored with neural attention models, creating systems to comprehend semantically diverse text documents and answer related questions remains challenging (Qiu et al., 2019) . These models tend to fail when faced with adversarial attacks: the type of noise humans can easily resolve (Jia and Liang, 2017; Blohm et al., 2018; Yuan et al., 2019) . These studies uncovered the limitations of QA systems, indicating that models might process text in a different manner than humans: they rely on pattern matching in lieu of human-like decision making processes which are required in comprehension tasks (Just and Carpenter, 1980; Blohm et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 269, |
|
"end": 287, |
|
"text": "(Qiu et al., 2019)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 397, |
|
"end": 418, |
|
"text": "(Jia and Liang, 2017;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 419, |
|
"end": 438, |
|
"text": "Blohm et al., 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 439, |
|
"end": 457, |
|
"text": "Yuan et al., 2019)", |
|
"ref_id": "BIBREF80" |
|
}, |
|
{ |
|
"start": 712, |
|
"end": 738, |
|
"text": "(Just and Carpenter, 1980;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 739, |
|
"end": 758, |
|
"text": "Blohm et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Answering and Machine Comprehension", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In the past years, researchers have started leveraging human gaze data for attentive neural modeling tasks. For example, Keller (2016, 2018) presented a neural QA network that combined both a task and attention module to predict and simulate human reading strategies. The authors proposed the trade-off hypothesis: human reading behaviors are task-specific and therefore evoke various specific strategies for each of these tasks. To validate their hypothesis, they used eye tracking data as the gold standard and compare model predictions of zero or one (fixated or not). In another work, Das et al. (2017) investigated the differences between neural and human attention over image regions in a visual question answering task. Their method focused on correlation ranking and visualizations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 140, |
|
"text": "Keller (2016, 2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 589, |
|
"end": 606, |
|
"text": "Das et al. (2017)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eye Tracking and Neural Networks", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Note that comparisons of human and neural attention distributions over text have not been explored so far. When the goal is to purely improve performance, several papers proposed integrating gaze data into neural attention as an additional variable in the equation or as a regularization method (Sugano and Bulling, 2016; Barrett et al., 2018; Qiao et al., 2018; Sood et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 322, |
|
"end": 343, |
|
"text": "Barrett et al., 2018;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 362, |
|
"text": "Qiao et al., 2018;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 381, |
|
"text": "Sood et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eye Tracking and Neural Networks", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "In order to further understand the behavior of neural networks, research in neural interpretability has grown dramatically in the recent years (Lipton, 2018; Gilpin et al., 2018; Hooker et al., 2019) . Such methods include: introducing adversarial examples, error class analysis, modeling techniques (e.g. self-explaining networks), and post-hoc analysis of attention distributions (Lipton, 2018; Alvarez-Melis and Jaakkola, 2018; Rudin, 2019; Sen et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 157, |
|
"text": "(Lipton, 2018;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 158, |
|
"end": 178, |
|
"text": "Gilpin et al., 2018;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 199, |
|
"text": "Hooker et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 431, |
|
"end": 443, |
|
"text": "Rudin, 2019;", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 461, |
|
"text": "Sen et al., 2020)", |
|
"ref_id": "BIBREF58" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Interpretability", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "To shed light on the decisions taken by these networks, multiple interpretability studies have investigated their outputs and predictions (Alvarez-Melis and Jaakkola, 2018; Blohm et al., 2018; Gilpin et al., 2018) , and analyzed their behavior through loss visualization from various architectures (Ribeiro et al., 2016).", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 192, |
|
"text": "Blohm et al., 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 213, |
|
"text": "Gilpin et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Interpretability", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Nevertheless, a real understanding of the internal processes of these black boxes is still rather limited (Gilpin et al., 2018) . Although these interpretations might explain predictions, there is still a lack of explanation regarding the mechanisms by which models work as well as limited insight regarding the relationship between machine and human visual attention (Lipton, 2018).", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 127, |
|
"text": "(Gilpin et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Interpretability", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The MovieQA dataset (Tapaswi et al., 2016) is used in all experiments conducted in this work. The dataset was comprised of a variety of available sources, however for the tasks in this work we only use the plot synopses. The plots vary between 1 to 20 paragraphs in size, and are checked by annotators to ensure they consist of movie relevant events and character relationships. There are a total of almost 15,000 human generated questions in this dataset corresponding to 408 movie plots. Of the 5 answer candidates denoted for each question, there is only one with a correct answer and the rest are deceptive incorrect answers. The data used for training all our models consists of plots with their corresponding questions: 9,848 training, 1,958 development and 3,138 test questions, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 42, |
|
"text": "(Tapaswi et al., 2016)", |
|
"ref_id": "BIBREF66" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "MovieQA Dataset", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Tracking Dataset", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reading Comprehension with Eye", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We present a novel reading comprehension eye tracking dataset 1 -MQA-RC -which allows researchers to observe changes in reading behavior in three comprehension tasks and to potentially induce processing strategies evoked by humans. This new extension provides a gold standard to compare and synchronize model versus human visual attention in comprehension tasks. To the best of our knowledge there are no available eye tracking datasets which use machine learning corpora as stimuli. Therefore, we build and use our reading comprehension gaze dataset as the gold standard. In addition, we provide coreference chains labeled by two human annotators 2 . Based on the lower fixation durations observed in the eye tracking data, we find that humans can easily resolve pronouns in the MQA-RC dataset (cf. Figure 6 ), where fixation durations are used to measure information processing load (Arnold et al., 2000; Rahman and Ng, 2012; Cinkara and Cabaroglu, 2015) . The figure also shows saliency over the proper nouns compared to their mentions in the chains.", |
|
"cite_spans": [ |
|
{ |
|
"start": 885, |
|
"end": 906, |
|
"text": "(Arnold et al., 2000;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 907, |
|
"end": 927, |
|
"text": "Rahman and Ng, 2012;", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 928, |
|
"end": 956, |
|
"text": "Cinkara and Cabaroglu, 2015)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 800, |
|
"end": 808, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Reading Comprehension with Eye", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Data collection Our dataset is based on two studies: in Study 1 we randomly selected a set of 16 documents on which the majority of both LSTMs and CNNs models failed to correctly answer the questions; in Study 2 we selected a different set of 16 documents on which the majority of models succeeded in predicting the correct answers. In total, our dataset contains gaze data from 23 English native speakers who were recorded while reading 32 documents (around 200-250 words each) in three different comprehension tasks. We used a Tobii 600Hz head-mount eye-tracker. In total, each session lasted 45 minutes including the time required for calibration and 5-minutes breaks every 15 minutes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reading Comprehension with Eye", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Study 1 For each of the 16 documents we designed three experimental conditions: 1) regular QA where the participants have access to the plot, the question, and five answer candidates; 2) openended answer generation where the participants see the plot and the question but have to generate their own responses; and 3) QA by memory where the participants can first read the plot and then answer to the question (5 possible answers) without having the plot available. In condition 3, participants have to recover information from memory in order to answer the question. To guarantee a balanced design, we divided the 48 experimental items in three schemes containing each document only once: 5-5-6 items (for condition 1-2-3) in schema A, 5-6-5 in schema B, and 6-5-5 in schema C. We randomly assigned each participant to one of these schemes where the order of the conditions followed a Latin Squared Design (Bradley, 1958) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 906, |
|
"end": 921, |
|
"text": "(Bradley, 1958)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reading Comprehension with Eye", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We conducted a follow up study in which we took only the plots for which the majority of CNN and LSTM models predicted correctly. We hypothesized that such items that are, on average, easier for the models are also easier for the humans (higher correlation score). In this study, we only collected data for the regular QA task (condition 1). The experiment was performed by five new participants. Each participant saw all the 16 plots in a randomized order. 3 Table 1 shows the distribution of data, inter-annotator agreement, and accuracy observed on our MQA-RC dataset. We show across both studies that humans agree on selected answers for the given questions and are highly accurate. It is important to note that we only use data from Table 1 : Distribution in MovieQA with eye tracking. We show the two different studies and the number of documents seen in each schema iteration. For study 1, there are three schema iterations (A, B, C) and for study 2 there are no schema iterations (as this is only for answer by selection). We also show the number of participants for each schema iteration, and the corresponding inter-annotator agreement (agreement on answer selected). Lastly, we show the accuracy of the participants for correctly answering each question in the respective study and schema iteration.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 460, |
|
"end": 467, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 738, |
|
"end": 745, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Study 2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "the regular QA task (condition 1) so that we can compare attention and performance for difficult vs. easy cases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data analysis", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Visualization tool We developed a web interface tool 4 to visualize the eye tracking data (cf. Figure 5a ). This tool is simple, easy to use and can visualize any eye tracking data where text is used as the stimulus (see an example in Figure 2 ). Inputs to the tool are two files -one with eye tracking data and another with the corresponding text stimulus. The eye tracking data consists of the x and y on-screen gaze coordinates, fixation duration for each word, and word IDs (cf. Figure 5b ). Our tool then maps the coordinates to the stimulus and provides real time scanpaths visualization. In addition, our tool can compare neural and human visual attention via linear visualization (left to right) with a split screen (e.g., left side model, right side human). This functionality allows users to observe, in real time, the dynamic network and human visual attention distributions.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 105, |
|
"text": "Figure 5a", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 236, |
|
"end": 244, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 484, |
|
"end": 493, |
|
"text": "Figure 5b", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data analysis", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "4 Neural Models", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data analysis", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We re-implement both the CNN and LSTM QA ensemble models with two staged attention from Blohm et al. (2018) that provides state of the art results on the MovieQA dataset (Tapaswi et al., 2016) . This is a multiple choice QA task in which each datapoint contains the plot of a movie as well as its corresponding question and five potential answer candidates. The models are based on the compare-aggregate framework. Concretely, the models compare the plot to the respective question and aggregates this comparison into one vector representation to obtain a confidence score after applying the softmax, for each answer candidate. The best results were obtained from the majority vote of the nine best performing models. The two-staged attention is performed at the word and at sentence level, where the plot is weighted with respect to the question or a possible answer candidate.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 107, |
|
"text": "Blohm et al. (2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 170, |
|
"end": 192, |
|
"text": "(Tapaswi et al., 2016)", |
|
"ref_id": "BIBREF66" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Two Staged Attention Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "G = softmax X T P (1) H = XG (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Two Staged Attention Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The word level X indicates the answer candidate (5 total) or the question. Subsequently, when computing sentence level attention, the question or answer candidate are represented as such. Blohm et al. (2018) apply the dot-product computation for the attention mechanism. The two variations of this model with CNN and LSTM models provided state of the art results on the MovieQA dataset with an average of 84.5% on the validation set and an average of 85% on the test set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 207, |
|
"text": "Blohm et al. (2018)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Two Staged Attention Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The authors performed a case study to further investigate the comprehension limitations of the models compared to human inference. In their analysis, they compared both networks against human performance in order to infer processing strategies which human possess but are not shown by the models. They investigated the most difficult cases, where the majority of both nine best models failed to correctly answer the question. This motivates why we used the difficult and easy documents for the CNN and LSTM models (Blohm et al., 2018) , as they are the only paper to date which both obtain SOTA results and offered qualitative analysis on the gap between human and model performance. When the majority of the models fail to correctly answer the question, we classify these documents as difficult cases for the two networks; vice versa for the easy documents.", |
|
"cite_spans": [ |
|
{ |
|
"start": 514, |
|
"end": 534, |
|
"text": "(Blohm et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Two Staged Attention Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We used the pre-trained XLNet model and finetuned it for the QA task (Tapaswi et al., 2016; . We opted for XLNet given that it is a recent Transformer network for language understanding that outperformed BERT and other largescale pre-trained language models on a variety of NLP tasks . It was trained on large corpora with training objectives which are compatible with unsupervised learning and can be fine-tuned to new tasks and datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 91, |
|
"text": "(Tapaswi et al., 2016;", |
|
"ref_id": "BIBREF66" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "XLNet Models", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "XLNet is based on an auto-regressive approach in which the model uses observations from previous time steps in order to predict the weight for the next time step. Advancing from the traditional auto-regressive approach, such as a Bidirectional LSTM, the authors also combine their network with an auto-encoding approach seen with the BERT model (Devlin et al., 2019) . By combining both approaches, XLNet introduces permutations on both sides. Moreover, the self-attention network (Vaswani et al., 2017) uses three components, queries, keys and values, all of which are calculated from their respective embeddings. The output is a weighted sum of the values, in which the values are weighted with a score calculated as the dot product of the respective queries and keys. It is important to note that the queries are related to the output and the keys are related to the given input. During fine-tuning, however, the model is essentially the Transformer-XL (Vaswani et al., 2017; . The auto-regressive language model estimates the joint probability over the input elements (in XLNet this x is language agnostic, i.e it is a subtoken).", |
|
"cite_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 366, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 481, |
|
"end": 503, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF70" |
|
}, |
|
{ |
|
"start": 956, |
|
"end": 978, |
|
"text": "(Vaswani et al., 2017;", |
|
"ref_id": "BIBREF70" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "XLNet Models", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (X) = t P (x t |X <t )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "XLNet Models", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The input sequence is the concatenation of each x in the plot with the question and a potential answer candidate (there are five possible answer candidates and one correct answer).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "XLNet Models", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "When fine-tuning on the question answering task, the model objective is multi-label classification given an input sequence. Note, the permutation language model is the component which helps XL-Net capture longer dependencies between elements in a given input sequence . In our method, we fine-tune the XLNet with 24 attention layers and 16 attention heads . The fine-tuned model makes a prediction by applying the argmax over the softmax, selecting the potential y-label, or answer candidate, with the highest confidence scores. The fine-tuned XLNet outperforms all other results on the validation set, obtaining the new highest accuracy of 91%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "XLNet Models", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We obtain token level gaze counts (frequency counts) by mapping the x, y coordinates to bounding boxes set around each word of the stimuli. We convert the raw gaze counts into a probability distribution over the document by dividing each gaze count by the sum of all gaze counts. These token level frequency counts obtained in the hit testing method, reflecting gaze duration: the more often a token of the text is attended to, the more important it is for humans to answer the question (Just and Carpenter, 1980) . We extract word level attention weights and average them over documents, thereby comparing the word attention at document level. Since for humans, the task is to read the entire short document and then answer the question given the entire context, all items within the context are interconnected. Therefore, it is misleading to only analyze attention over one sentence or one part of the document. Furthermore, it is not cognitively plausible to limit comparison to attention distribution over specific sentences or only part of the documents.", |
|
"cite_spans": [ |
|
{ |
|
"start": 487, |
|
"end": 513, |
|
"text": "(Just and Carpenter, 1980)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Gaze-Attention Extraction", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The sentence level attention for the CNN and LSTM models have very low entropy, where essentially almost all of the attention is distributed to one sentence and the rest of the sentence attention weights are almost zero. This is a property of the two-staged attention, which XLNet does not have. Therefore, we leverage word level attention to compare model attention versus human visual attention. During evaluation, we extract token attention weights for each of the nine best models. We then ensemble the neural attention weights. Figure 7a and 7b in the Appendix show the word level attention distribution of CNN and LSTM models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 533, |
|
"end": 542, |
|
"text": "Figure 7a", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Extracting LSTM and CNN Word Level Attention", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We extracted the attention weights from the nine best XLNet models by leveraging the output of the last attention layer. It contains token level weights for each plot-answer candidate pairing. More specifically, the output of the last attention layer is a matrix of 1024 x 1024, which contains a vector of attention weights vectors for each respective token. We did so because in Transformers, attention computations happen simultaneously, while for LSTMs and CNNs they happen last. In order to compare XLNet to the LSTM and CNN models, we therefore only take the final output of the self-attention layer. Furthermore, to make these weights comparable to human gaze attention we take the maximum value in each token vector (Htut et al., 2019) and normalize them by the sum of the weights.", |
|
"cite_spans": [ |
|
{ |
|
"start": 723, |
|
"end": 742, |
|
"text": "(Htut et al., 2019)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting XLNet Word Level Attention", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "KL divergence In order to compare the human and neural attention distributions, we computed the Kullback-Leibler divergence (Kullback and Leibler, 1951) . Concretely in this paper, we calculate the KL divergence for average-human to average-model along the word level attention distributions. This method is used to compare two probability distributions, akin to relative entropy. The output will reflect an understanding of the differences between the two probability distributions (cf. Equation 4).", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 152, |
|
"text": "(Kullback and Leibler, 1951)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Comparison Metrics", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "D KL (H M ) = x\u2208X P (x) log H(x) M (x) .", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Attention Comparison Metrics", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "where H stands for the human attention distribution and M for the model attention distribution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Comparison Metrics", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Spearman's rank correlation Spearman's rank correlation coefficient is used to discover the relationship between two variables (Zar, 1972) . We use the standard Spearman's rank correlation coefficients implementation from SciKit-Learn (Kokoska and Zwillinger, 2000; Pedregosa et al., 2011) , to measure if there is a correlation between model performance and the KL divergence between models and humans attention distributions. Model performance refers to the number of models that provide correct answers in the ensemble setting. Because KL divergence reflects the differences between distribution, i.e. lower divergence means high similarity to human visual attention, a negative Spearman's rank correlation indicates that higher performance means high similarity to human visual attention. The p-value indicates the significance and the likelihood that the null hypothesis will be rejected. With p-values below 0.01, we can reject the null hypothesis and thus accept that there is a statistically significant correlation between divergence and accuracy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 138, |
|
"text": "(Zar, 1972)", |
|
"ref_id": "BIBREF81" |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 265, |
|
"text": "(Kokoska and Zwillinger, 2000;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 266, |
|
"end": 289, |
|
"text": "Pedregosa et al., 2011)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Comparison Metrics", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "In order to explore the relationship of model performance and similarity between model attention and human visual attention, we plot in Figures 3a and 3b the nine best LSTM and XLNet models performances for each document, sorted by the sum of divergence scores and number of correct models. Similar comparison between CNN and XLNet models can be found in the Appendix, Figure 4a and 4b. Performance, i.e. correctness, refers to the number of models within the ensemble that provided correct answer. The y-axis represents the KL divergence on the left, while the x-axis represents the documents (32 in total), and the legend indicates which models the datapoints refer to. The documents presented on the left of the figure are part of the easier ones and the divergence scales up as document difficulty increases. When models are faced with difficult questions, we observe performance drops and this seems to be at a specific KL threshold. We suppose that this behavior aligns with the observations reported in the case study from (Blohm et al., 2018) , where human annotators required several strategies to solve difficult questions. Moreover, our plots show a correlation between attentive LSTM and CNN model performance and similarity to human visual attention. Table 2 : Spearman's rank correlation coefficients between the number of models which correctly answered a given question on each document and the KL divergence between models and human visual attention. Bold numbers indicate statistically significant correlation scores, where p-value < 0.001.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1031, |
|
"end": 1051, |
|
"text": "(Blohm et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 154, |
|
"text": "Figures 3a and 3b", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 379, |
|
"text": "Figure 4a", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 1265, |
|
"end": 1272, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models vs. Humans", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "To quantify the correlation between system performance and dissimilarity between model and human visual attention, we report in Table 2 the majority vote ensemble accuracy scores for each of the nine best models, Spearman's rank correlation coefficients between the KL divergence scores and the number of models that correctly answered questions, and the corresponding p-values. As observed in Figure 3a (and Figure 4a in the Appendix), there are two statistically significant negative correla- On the x-axis we show each of the 32 documents with the corresponding KL divergence score on the left y-axis. We plot performance of LSTM (cf. Figure 3a) and XLNet (cf. Figure 3b ) models for each document with green plus signs as the number of correct models indicated on the right y-axis. In Figure 3a , the larger blue dots show the LSTM divergence score for each document, while the smaller orange dots show the divergence score of XLNet models. Vice-Versa, in Figure 3b , the larger orange dots show the XLNet score for each document, while the smaller blue dots show the divergence score of the LSTM models. The documents are ordered by ascending divergence score.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 135, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 394, |
|
"end": 403, |
|
"text": "Figure 3a", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 418, |
|
"text": "Figure 4a", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 638, |
|
"end": 648, |
|
"text": "Figure 3a)", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 664, |
|
"end": 673, |
|
"text": "Figure 3b", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 789, |
|
"end": 798, |
|
"text": "Figure 3a", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 960, |
|
"end": 969, |
|
"text": "Figure 3b", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models vs. Humans", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "tions from the attentive LSTM (-0.73) and CNN (-0.72) models. These correlation scores indicate that for either LSTM or CNN, as the number of models that correctly answered a question related to a document increases, the KL divergence of these model types to human visual attention decreases. We conclude that there is a correlation between task performance and similarity between neural attention when leveraging LSTM or CNN and human visual attention distributions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models vs. Humans", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "However in contrast, behavior from XLNet models show weak negative correlation of -0.16 and p = 0.381 (cf. Table 2, cf. Figure 3b ). Most XLNet models correctly answer the questions, although the KL divergence increases (cf. Figure 3b ), i.e. there is no significant correlation between performance and similarity to human visual attention. All the nine XLNet models always provide correct answers. One potential reason could be that we chose documents that are difficult to answer based on an analysis of CNN and LSTM models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 129, |
|
"text": "Figure 3b", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 234, |
|
"text": "Figure 3b", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models vs. Humans", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In Table 3 , we perform a pairwise comparison of the average KL divergence for the three neural models using a linear regression model with Tukey's alpha adjustment method (Sinclair et al., 2013) . Interestingly, there is a statistically significant difference between the KL divergence of LSTMs compared to XLNets (\u03b2 = \u22120.003, p < 0.01).", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 195, |
|
"text": "(Sinclair et al., 2013)", |
|
"ref_id": "BIBREF62" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models vs. Models", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Even though the performance of the XLNets are better with respect to accuracy, LSTMs are significantly more similar to human visual attention.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models vs. Models", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "This observation suggests that even though aiming to interpret the black box by comparing it to human performance provides insight, we should not force all model types to emulate human visual attention while performing the same task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models vs. Models", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Our core contribution is a new method for comparing human visual attention versus neural attention distributions in machine reading comprehension. To the best of our knowledge, we are the first to do so with gaze data. Our findings show that CNNs and LSTMs have a statistically significant correlation between similarity to human visual attention distributions and system performance. Interestingly, the same is not true for XLNets. Moreover, the attention weights of the LSTMs are significantly different compared to the XLNets. Although these pre-trained Transformer networks are less similar to human visual attention, our fine-tuned model obtains the new SOTA on the MovieQA benchmark dataset with 91% accuracy on the validation set. In addition, we extend the MovieQA dataset with eye tracking data, release this as open source and present an attentive reading visualiza- (LSTM vs. CNN, LSTM vs. XLNET, and CNN vs. XLNet) . We compare the models to show if the differences in attention distributions between models is of statistical significance; the significantly different model type (LSTM) can be seen in bold, where p-value < 0.01. tion tool that supports users to gain insights when comparing human versus neural attention. In future work we plan to extend our understanding of these large-scale pre-trained language models. It would be interesting to investigate whether the observed increase in performance but lack of similarity to humans in the XLNet models is because they are pre-trained on large external corpora or whether this is due to inherent properties in architecture, when compared to other pre-trained models (such as BERT). Lastly, to further disentangle token level saliency versus cognitive load of processing, additional analyses and metrics could be considered.", |
|
"cite_spans": [ |
|
{ |
|
"start": 877, |
|
"end": 926, |
|
"text": "(LSTM vs. CNN, LSTM vs. XLNET, and CNN vs. XLNet)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "E. Sood was funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under Germany's Excellence Strategy -EXC 2075 -390740016; A. Bulling was funded by the European Research Council (ERC; grant agreement 801708); S. Tannert was supported by IBM Research AI through the IBM AI Horizons Network; N.T. Vu was funded by the Carl Zeiss Foundation. We would like to especially thank Manuel Mager for his valuable feedback and guidance. And to Pavel Denisov and Sean Papay for their helpful insights and suggestions. We would also like to thank Glorianna Jagfeld for her contributions on the dataset, and Fabian K\u00f6gel for his contributions on the visualization tool. Lastly, we would like to thank the anonymous reviewers for their useful feedback. Figure 4a as well as of CNN and the XLNet models (cf. Figure 4b) to point out the differences between models. The CNN model divergences are highlighted in the large blue dots, and LSTM and XLNet models are indicated in smaller orange dots. The correctness (in red) indicated on the right y-axis, shows the number correct CNN models per document.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 765, |
|
"end": 774, |
|
"text": "Figure 4a", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 819, |
|
"end": 829, |
|
"text": "Figure 4b)", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "The coreference annotation We used the off-theshelf high performing coreference model (Lee et al., 2018 ) (following the implementation from https:", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 103, |
|
"text": "(Lee et al., 2018", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.3 Coreference Resolution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(a) Interface for visualization tool and example of visualization scan path.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.3 Coreference Resolution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(b) Eye tracking data file required for visualization tool Figure 5 : Figure 5a shows the control options (left side) that allow users to pause the visualization with the space bar, change the speed, duration variables, and length of the scan path. Figure 5a , on the right side, shows an example txt stimuli file and the simulated scan path. The red dot indicates fixation duration and expands given the duration length (what we extract as human attention weights). In Figure 5b we show an example of the gaze data txt file required for visualization tool. Figure 6 : Here we compute the relative importance of coreference chains observed in the human data, where we use fixation durations to denote saliency. We show the agreement in our MQA-RC dataset, between humans, that antecedents are more salient compared to pronoun co-reference chains.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 67, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 70, |
|
"end": 79, |
|
"text": "Figure 5a", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 258, |
|
"text": "Figure 5a", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 479, |
|
"text": "Figure 5b", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 558, |
|
"end": 566, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.3 Coreference Resolution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "ported results over the OntoNotes data, that is, the CoNLL 2012 version of it; thus the model predictions are based on the annotation schema defined in (Pradhan et al., 2012) . We then test the model on the MQA-RC dataset to obtain our coreference chains. We prepared the data with the automatically generated coreference chain predictions (antecedents and their corresponding pronouns coreference chains), into a web-based annotation tool, WebAnno3 (Eckart de Castilho et al., 2016) . At this point, two experienced annotators (one English native speaker, and the other near-native) checked and corrected the automatically generated annotations. The annotators obtained 100% agreement; we suppose this is due to the small amount of documents, short length of sentences in the documents, and the documents contain easy to resolve pronouns (as seen in Figure 6 ). We then merge the corrected annotations between annotators, and present this as our coreference annotation over the 32 documents. Figure 7 : We show the word level attention distributions for both CNN 7a and LSTM 7b. The word level attention distribution has high entropy, and thus provide a suitable option to compare to human attention.", |
|
"cite_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 174, |
|
"text": "(Pradhan et al., 2012)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 450, |
|
"end": 483, |
|
"text": "(Eckart de Castilho et al., 2016)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 851, |
|
"end": 859, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 993, |
|
"end": 1001, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.3 Coreference Resolution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The dataset is available at https://perceptualui. org/publications/sood20_conll/ 2 See appendix material for further information on coreference annotation", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to maintain the same amount of data samples for both study 1 and 2, we randomly selected a subset participants data from study 1. Instead of using the full 18 participants from study 1, we used 15 participants.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The tool is also available at https:// perceptualui.org/publications/sood20_ conll/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "//github.com/kentonl/e2e-coref), in order to obtain coreference chains over our MQA-RC dataset. We train the model on the same data as reported in(Lee et al., 2018), reproducing re-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Towards robust interpretability with self-explaining neural networks", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Alvarez-Melis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Jaakkola", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 32nd International Conference on Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7786--7795", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Alvarez-Melis and Tommi S Jaakkola. 2018. Towards robust interpretability with self-explaining neural networks. In Proceedings of the 32nd Interna- tional Conference on Neural Information Processing Systems, pages 7786-7795. Curran Associates Inc.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The rapid use of gender information: Evidence of the time course of pronoun resolution from eyetracking", |
|
"authors": [ |
|
{ |
|
"first": "Jennifer", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Arnold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janet", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Eisenband", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Brown-Schmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John C", |
|
"middle": [], |
|
"last": "Trueswell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Cognition", |
|
"volume": "76", |
|
"issue": "1", |
|
"pages": "13--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jennifer E Arnold, Janet G Eisenband, Sarah Brown- Schmidt, and John C Trueswell. 2000. The rapid use of gender information: Evidence of the time course of pronoun resolution from eyetracking. Cognition, 76(1):B13-B26.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1409.0473" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Sequence classification with human attention", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Barrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joachim", |
|
"middle": [], |
|
"last": "Bingel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nora", |
|
"middle": [], |
|
"last": "Hollenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marek", |
|
"middle": [], |
|
"last": "Rei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 22nd Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "302--312", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Barrett, Joachim Bingel, Nora Hollenstein, Marek Rei, and Anders S\u00f8gaard. 2018. Sequence classification with human attention. In Proceedings of the 22nd Conference on Computational Natural Language Learning, pages 302-312.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Comparing attentionbased convolutional and recurrent neural networks: Success and limitations in machine reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Blohm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Glorianna", |
|
"middle": [], |
|
"last": "Jagfeld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ekta", |
|
"middle": [], |
|
"last": "Sood", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngoc", |
|
"middle": [ |
|
"Thang" |
|
], |
|
"last": "Vu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 22nd Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "108--118", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthias Blohm, Glorianna Jagfeld, Ekta Sood, Xiang Yu, and Ngoc Thang Vu. 2018. Comparing attention- based convolutional and recurrent neural networks: Success and limitations in machine reading compre- hension. In Proceedings of the 22nd Conference on Computational Natural Language Learning, pages 108-118.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Complete counterbalancing of immediate sequential effects in a latin square design", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "James", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bradley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1958, |
|
"venue": "Journal of the American Statistical Association", |
|
"volume": "53", |
|
"issue": "282", |
|
"pages": "525--528", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James V Bradley. 1958. Complete counterbalancing of immediate sequential effects in a latin square de- sign. Journal of the American Statistical Associa- tion, 53(282):525-528.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A web-based tool for the integrated annotation of semantic and syntactic structures", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Eckart De Castilho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00c9va", |
|
"middle": [], |
|
"last": "M\u00fajdricza-Maydt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silvana", |
|
"middle": [], |
|
"last": "Seid Muhie Yimam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Hartmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Workshop on Language Technology Resources and Tools for Digital Humanities (LT4DH)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "76--84", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Eckart de Castilho,\u00c9va M\u00fajdricza-Maydt, Seid Muhie Yimam, Silvana Hartmann, Iryna Gurevych, Anette Frank, and Chris Biemann. 2016. A web-based tool for the integrated annotation of se- mantic and syntactic structures. In Proceedings of the Workshop on Language Technology Resources and Tools for Digital Humanities (LT4DH), pages 76-84, Osaka, Japan. The COLING 2016 Organiz- ing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Attention-based models for speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Jan K Chorowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitriy", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Serdyuk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "577--585", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan K Chorowski, Dzmitry Bahdanau, Dmitriy Serdyuk, Kyunghyun Cho, and Yoshua Bengio. 2015. Attention-based models for speech recogni- tion. In Advances in neural information processing systems, pages 577-585.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Parallel functioning hypothesis to explain pronoun resolution and processing load: Evidence from eye-tracking", |
|
"authors": [ |
|
{ |
|
"first": "Emrah", |
|
"middle": [], |
|
"last": "Cinkara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ne\u015fe", |
|
"middle": [], |
|
"last": "Cabaroglu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Journal of Quantitative Linguistics", |
|
"volume": "22", |
|
"issue": "2", |
|
"pages": "119--134", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1080/09296174.2014.1001635" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emrah Cinkara and Ne\u015fe Cabaroglu. 2015. Par- allel functioning hypothesis to explain pronoun resolution and processing load: Evidence from eye-tracking. Journal of Quantitative Linguistics, 22(2):119-134.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Transformer-xl: Attentive language models beyond a fixed-length context", |
|
"authors": [ |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Jaime", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2978--2988", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zihang Dai, Zhilin Yang, Yiming Yang, Jaime G Car- bonell, Quoc Le, and Ruslan Salakhutdinov. 2019. Transformer-xl: Attentive language models beyond a fixed-length context. In Proceedings of the 57th Annual Meeting of the Association for Computa- tional Linguistics, pages 2978-2988.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Human attention in visual question answering: Do humans and deep networks look at the same regions?", |
|
"authors": [ |
|
{ |
|
"first": "Abhishek", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harsh", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Larry", |
|
"middle": [], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computer Vision and Image Understanding", |
|
"volume": "163", |
|
"issue": "", |
|
"pages": "90--100", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhishek Das, Harsh Agrawal, Larry Zitnick, Devi Parikh, and Dhruv Batra. 2017. Human attention in visual question answering: Do humans and deep net- works look at the same regions? Computer Vision and Image Understanding, 163:90-100.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Attention, information processing and eye movement control. Reading as a perceptual process", |
|
"authors": [ |
|
{ |
|
"first": "Heiner", |
|
"middle": [], |
|
"last": "Deubel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "O'regan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Radach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "355--374", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heiner Deubel, K O'Regan, Ralph Radach, et al. 2000. Attention, information processing and eye move- ment control. Reading as a perceptual process, pages 355-374.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. In NAACL-HLT.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Who framed roger rabbit? multiple choice questions answering about movie plot", |
|
"authors": [ |
|
{ |
|
"first": "Daria", |
|
"middle": [], |
|
"last": "Dzendzik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carl", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daria Dzendzik, Carl Vogel, and Qun Liu. 2017. Who framed roger rabbit? multiple choice questions an- swering about movie plot.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Beyond eye gaze: What else can eyetracking reveal about cognition and cognitive development? Developmental cognitive neuroscience", |
|
"authors": [ |
|
{ |
|
"first": "Bel\u00e9n", |
|
"middle": [], |
|
"last": "Maria K Eckstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alison T Miller", |
|
"middle": [], |
|
"last": "Guerra-Carrillo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silvia", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Singley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bunge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "25", |
|
"issue": "", |
|
"pages": "69--91", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria K Eckstein, Bel\u00e9n Guerra-Carrillo, Alison T Miller Singley, and Silvia A Bunge. 2017. Beyond eye gaze: What else can eyetracking reveal about cognition and cognitive development? Developmen- tal cognitive neuroscience, 25:69-91.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Computational visual attention systems and their cognitive foundations: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Simone", |
|
"middle": [], |
|
"last": "Frintrop", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erich", |
|
"middle": [], |
|
"last": "Rome", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Henrik", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Christensen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "ACM Transactions on Applied Perception (TAP)", |
|
"volume": "7", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simone Frintrop, Erich Rome, and Henrik I Chris- tensen. 2010. Computational visual attention sys- tems and their cognitive foundations: A survey. ACM Transactions on Applied Perception (TAP), 7(1):6.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Explaining explanations: An overview of interpretability of machine learning", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Leilani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Gilpin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ayesha", |
|
"middle": [], |
|
"last": "Ben Z Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Bajwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lalana", |
|
"middle": [], |
|
"last": "Specter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kagal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 IEEE 5th International Conference on data science and advanced analytics (DSAA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "80--89", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leilani H Gilpin, David Bau, Ben Z Yuan, Ayesha Ba- jwa, Michael Specter, and Lalana Kagal. 2018. Ex- plaining explanations: An overview of interpretabil- ity of machine learning. In 2018 IEEE 5th Interna- tional Conference on data science and advanced an- alytics (DSAA), pages 80-89. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Modeling human reading with neural attention", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Hahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Keller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "85--95", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Hahn and Frank Keller. 2016. Modeling hu- man reading with neural attention. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 85-95, Austin, Texas. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Modeling task effects in human reading with neural attention", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Hahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Keller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1808.00054" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Hahn and Frank Keller. 2018. Modeling task effects in human reading with neural attention. arXiv preprint arXiv:1808.00054.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Visual attention and eye movement control during reading and picture viewing", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Eye movements and visual cognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "260--283", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John M Henderson. 1992. Visual attention and eye movement control during reading and picture view- ing. In Eye movements and visual cognition, pages 260-283. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Teaching machines to read and comprehend", |
|
"authors": [ |
|
{ |
|
"first": "Karl", |
|
"middle": [], |
|
"last": "Moritz Hermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Kocisky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lasse", |
|
"middle": [], |
|
"last": "Espeholt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Kay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mustafa", |
|
"middle": [], |
|
"last": "Suleyman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1693--1701", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karl Moritz Hermann, Tomas Kocisky, Edward Grefen- stette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend. In Advances in neural information processing systems, pages 1693-1701.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Long Short-Term Memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural Computation", |
|
"volume": "", |
|
"issue": "8", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long Short-Term Memory. Neural Computation, 9(8).", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "The role of visual attention in saccadic eye movements", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "James", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baskaran", |
|
"middle": [], |
|
"last": "Hoffman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Subramaniam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Perception & psychophysics", |
|
"volume": "57", |
|
"issue": "6", |
|
"pages": "787--795", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James E Hoffman and Baskaran Subramaniam. 1995. The role of visual attention in saccadic eye move- ments. Perception & psychophysics, 57(6):787-795.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A benchmark for interpretability methods in deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Hooker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dumitru", |
|
"middle": [], |
|
"last": "Erhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pieter-Jan", |
|
"middle": [], |
|
"last": "Kindermans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9737--9748", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sara Hooker, Dumitru Erhan, Pieter-Jan Kindermans, and Been Kim. 2019. A benchmark for interpretabil- ity methods in deep neural networks. In Advances in Neural Information Processing Systems, pages 9737-9748.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Do attention heads in bert track syntactic dependencies? arXiv preprint", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Phu Mon Htut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shikha", |
|
"middle": [], |
|
"last": "Phang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel R", |
|
"middle": [], |
|
"last": "Bordia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.12246" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Phu Mon Htut, Jason Phang, Shikha Bordia, and Samuel R Bowman. 2019. Do attention heads in bert track syntactic dependencies? arXiv preprint arXiv:1911.12246.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Attention is not explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarthak", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarthak Jain and Byron C. Wallace. 2019. Attention is not explanation.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Adversarial examples for evaluating reading comprehension systems", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2021--2031", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Jia and Percy Liang. 2017. Adversarial exam- ples for evaluating reading comprehension systems. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 2021-2031.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "A theory of reading: From eye fixations to comprehension", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Marcel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patricia", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Just", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Carpenter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1980, |
|
"venue": "Psychological review", |
|
"volume": "87", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcel A Just and Patricia A Carpenter. 1980. A the- ory of reading: From eye fixations to comprehension. Psychological review, 87(4):329.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "CRC standard probability and statistics tables and formulae", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Kokoska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Zwillinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Kokoska and Daniel Zwillinger. 2000. CRC standard probability and statistics tables and formu- lae. Crc Press.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "The intriguing interactive relationship between visual attention and saccadic eye movements. The Oxford handbook of eye movements", |
|
"authors": [ |
|
{ |
|
"first": "Ami", |
|
"middle": [], |
|
"last": "Kristjansson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "455--470", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ami Kristjansson. 2011. The intriguing interactive re- lationship between visual attention and saccadic eye movements. The Oxford handbook of eye move- ments, pages 455-470.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "On information and sufficiency. The annals of mathematical statistics", |
|
"authors": [ |
|
{ |
|
"first": "Solomon", |
|
"middle": [], |
|
"last": "Kullback", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Richard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Leibler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1951, |
|
"venue": "", |
|
"volume": "22", |
|
"issue": "", |
|
"pages": "79--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Solomon Kullback and Richard A Leibler. 1951. On information and sufficiency. The annals of mathe- matical statistics, 22(1):79-86.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Higher-order coreference resolution with coarse-tofine inference", |
|
"authors": [ |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.05392" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenton Lee, Luheng He, and Luke Zettlemoyer. 2018. Higher-order coreference resolution with coarse-to- fine inference. arXiv preprint arXiv:1804.05392.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "The mythos of model interpretability. Queue", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zachary C Lipton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "16", |
|
"issue": "", |
|
"pages": "31--57", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zachary C Lipton. 2018. The mythos of model inter- pretability. Queue, 16(3):31-57.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Reading like HER: Human reading inspired extractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Ling", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Ao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feiyang", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qing", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3031--3041", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1300" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ling Luo, Xiang Ao, Yan Song, Feiyang Pan, Min Yang, and Qing He. 2019. Reading like HER: Hu- man reading inspired extractive summarization. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 3031- 3041.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Effective approaches to attention-based neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1412--1421", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015. Effective approaches to attention-based neural machine translation. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, EMNLP 2015, Lisbon, Portu- gal, September 17-21, 2015, pages 1412-1421.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Contrastbased image attention analysis by using fuzzy growing", |
|
"authors": [ |
|
{ |
|
"first": "Yu-Fei", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong-Jiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the eleventh ACM international conference on Multimedia", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "374--381", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu-Fei Ma and Hong-Jiang Zhang. 2003. Contrast- based image attention analysis by using fuzzy grow- ing. In Proceedings of the eleventh ACM interna- tional conference on Multimedia, pages 374-381. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "First attention then intention: Insights from computational neuroscience of vision", |
|
"authors": [ |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Milosavljevic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Moran", |
|
"middle": [], |
|
"last": "Cerf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "International Journal of advertising", |
|
"volume": "27", |
|
"issue": "3", |
|
"pages": "381--398", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Milica Milosavljevic and Moran Cerf. 2008. First at- tention then intention: Insights from computational neuroscience of vision. International Journal of ad- vertising, 27(3):381-398.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Recurrent models of visual attention", |
|
"authors": [ |
|
{ |
|
"first": "Volodymyr", |
|
"middle": [], |
|
"last": "Mnih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Heess", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Graves", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2204--2212", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Volodymyr Mnih, Nicolas Heess, Alex Graves, et al. 2014. Recurrent models of visual attention. In Advances in neural information processing systems, pages 2204-2212.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Ms marco: A human-generated machine reading comprehension dataset", |
|
"authors": [ |
|
{ |
|
"first": "Tri", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mir", |
|
"middle": [], |
|
"last": "Rosenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xia", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saurabh", |
|
"middle": [], |
|
"last": "Tiwary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rangan", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tri Nguyen, Mir Rosenberg, Xia Song, Jianfeng Gao, Saurabh Tiwary, Rangan Majumder, and Li Deng. 2016. Ms marco: A human-generated machine read- ing comprehension dataset.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Probing neural network comprehension of natural language arguments", |
|
"authors": [ |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Niven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hung-Yu", |
|
"middle": [], |
|
"last": "Kao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4658--4664", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy Niven and Hung-Yu Kao. 2019. Probing neu- ral network comprehension of natural language ar- guments. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 4658-4664.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Scikit-learn: Machine learning in Python", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pedregosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Varoquaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gramfort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Thirion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Grisel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Blondel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Prettenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Dubourg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Vanderplas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Cournapeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Brucher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Perrot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Duchesnay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2825--2830", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Semantic ambiguity and perceived ambiguity", |
|
"authors": [ |
|
{ |
|
"first": "Massimo", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Semantic Ambiguity and Underspecification", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "159--201", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Massimo Poesio. 1994. Semantic ambiguity and per- ceived ambiguity. In Semantic Ambiguity and Un- derspecification, pages 159-201. CSLI Publications.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Orienting of attention. Quarterly journal of experimental psychology", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Michael I Posner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1980, |
|
"venue": "", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "3--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael I Posner. 1980. Orienting of attention. Quar- terly journal of experimental psychology, 32(1):3- 25.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Attention and the detection of signals", |
|
"authors": [ |
|
{ |
|
"first": "Charles", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Michael I Posner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Snyder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Davidson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1980, |
|
"venue": "Journal of experimental psychology: General", |
|
"volume": "109", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael I Posner, Charles R Snyder, and Brian J Davidson. 1980. Attention and the detection of sig- nals. Journal of experimental psychology: General, 109(2):160.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Conll-2012 shared task: Modeling multilingual unrestricted coreference in ontonotes", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Sameer Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Moschitti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuchen", |
|
"middle": [], |
|
"last": "Uryupina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Joint Conference on EMNLP and CoNLL-Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sameer Pradhan, Alessandro Moschitti, Nianwen Xue, Olga Uryupina, and Yuchen Zhang. 2012. Conll- 2012 shared task: Modeling multilingual unre- stricted coreference in ontonotes. In Joint Confer- ence on EMNLP and CoNLL-Shared Task, pages 1- 40.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Exploring human-like attention supervision in visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Tingting", |
|
"middle": [], |
|
"last": "Qiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duanqing", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tingting Qiao, Jianfeng Dong, and Duanqing Xu. 2018. Exploring human-like attention supervision in visual question answering. In Thirty-Second AAAI Confer- ence on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "A survey on neural machine reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Boyu", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jungang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yingfei", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.03824" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Boyu Qiu, Xu Chen, Jungang Xu, and Yingfei Sun. 2019. A survey on neural machine reading compre- hension. arXiv preprint arXiv:1906.03824.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Improving language understanding by generative pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language under- standing by generative pre-training.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Resolving complex cases of definite pronouns: the winograd schema challenge", |
|
"authors": [ |
|
{ |
|
"first": "Altaf", |
|
"middle": [], |
|
"last": "Rahman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "777--789", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Altaf Rahman and Vincent Ng. 2012. Resolving complex cases of definite pronouns: the winograd schema challenge. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Lan- guage Processing and Computational Natural Lan- guage Learning, pages 777-789. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Squad: 100, 000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100, 000+ ques- tions for machine comprehension of text. ArXiv, abs/1606.05250.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Eye movements in reading and information processing: 20 years of research", |
|
"authors": [ |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Rayner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Psychological bulletin", |
|
"volume": "124", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keith Rayner. 1998. Eye movements in reading and information processing: 20 years of research. Psy- chological bulletin, 124(3):372.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Eye movements and attention in reading, scene perception, and visual search. The quarterly journal of experimental psychology", |
|
"authors": [ |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Rayner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "62", |
|
"issue": "", |
|
"pages": "1457--1506", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keith Rayner. 2009. Eye movements and attention in reading, scene perception, and visual search. The quarterly journal of experimental psychology, 62(8):1457-1506.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "On closed world data bases", |
|
"authors": [ |
|
{ |
|
"first": "Raymond", |
|
"middle": [], |
|
"last": "Reiter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1981, |
|
"venue": "Readings in artificial intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "119--140", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raymond Reiter. 1981. On closed world data bases. In Readings in artificial intelligence, pages 119-140. Elsevier.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Why should i trust you?: Explaining the predictions of any classifier", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Marco Tulio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1135--1144", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Tulio Ribeiro, Sameer Singh, and Carlos Guestrin. 2016. Why should i trust you?: Explain- ing the predictions of any classifier. In Proceed- ings of the 22nd ACM SIGKDD international con- ference on knowledge discovery and data mining, pages 1135-1144. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "On looking into the black box: Prospects and limits in the search for mental models", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "William", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nancy", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rouse", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Morris", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1986, |
|
"venue": "Psychological bulletin", |
|
"volume": "100", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William B Rouse and Nancy M Morris. 1986. On look- ing into the black box: Prospects and limits in the search for mental models. Psychological bulletin, 100(3):349.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead", |
|
"authors": [ |
|
{ |
|
"first": "Cynthia", |
|
"middle": [], |
|
"last": "Rudin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Nature Machine Intelligence", |
|
"volume": "1", |
|
"issue": "5", |
|
"pages": "206--215", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cynthia Rudin. 2019. Stop explaining black box ma- chine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence, 1(5):206-215.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Controlled and automatic human information processing: I. detection, search, and attention. Psychological review", |
|
"authors": [ |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Richard M Shiffrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "", |
|
"volume": "84", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Walter Schneider and Richard M Shiffrin. 1977. Con- trolled and automatic human information processing: I. detection, search, and attention. Psychological re- view, 84(1):1.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "Human attention maps for text classification: Do humans and neural networks focus on the same words?", |
|
"authors": [ |
|
{ |
|
"first": "Cansu", |
|
"middle": [], |
|
"last": "Sen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Hartvigsen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Biao", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangnan", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elke", |
|
"middle": [], |
|
"last": "Rundensteiner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4596--4608", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cansu Sen, Thomas Hartvigsen, Biao Yin, Xiangnan Kong, and Elke Rundensteiner. 2020. Human at- tention maps for text classification: Do humans and neural networks focus on the same words? In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4596- 4608, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "Bidirectional attention flow for machine comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Minjoon", |
|
"middle": [], |
|
"last": "Seo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aniruddha", |
|
"middle": [], |
|
"last": "Kembhavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.01603" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, and Hannaneh Hajishirzi. 2016. Bidirectional attention flow for machine comprehension. arXiv preprint arXiv:1611.01603.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "Disan: Directional self-attention network for rnn/cnn-free language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianyi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Long", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shirui", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengqi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Shen, Tianyi Zhou, Guodong Long, Jing Jiang, Shirui Pan, and Chengqi Zhang. 2018. Disan: Di- rectional self-attention network for rnn/cnn-free lan- guage understanding. In Thirty-Second AAAI Con- ference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "Controlled and automatic human information processing: Ii. perceptual learning, automatic attending and a general theory", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Richard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Shiffrin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "Psychological review", |
|
"volume": "84", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard M Shiffrin and Walter Schneider. 1977. Con- trolled and automatic human information process- ing: Ii. perceptual learning, automatic attending and a general theory. Psychological review, 84(2):127.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "Alpha level adjustments for multiple dependent variable analyses and their applicability-a review", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Sinclair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Paul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [ |
|
"Jane" |
|
], |
|
"last": "Taylor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hobbs", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Int J Sports Sci Eng", |
|
"volume": "7", |
|
"issue": "1", |
|
"pages": "17--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J Sinclair, Paul J Taylor, and Sarah Jane Hobbs. 2013. Alpha level adjustments for multiple dependent vari- able analyses and their applicability-a review. Int J Sports Sci Eng, 7(1):17-20.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "Philipp M\u00fcller, and Andreas Bulling. 2020. Improving natural language processing tasks with human gaze-guided neural attention", |
|
"authors": [ |
|
{ |
|
"first": "Ekta", |
|
"middle": [], |
|
"last": "Sood", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Tannert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Advances in Neural Information Processing Systems (NeurIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ekta Sood, Simon Tannert, Philipp M\u00fcller, and An- dreas Bulling. 2020. Improving natural language processing tasks with human gaze-guided neural at- tention. In Advances in Neural Information Process- ing Systems (NeurIPS).", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "Seeing with humans: Gaze-assisted neural image captioning", |
|
"authors": [ |
|
{ |
|
"first": "Yusuke", |
|
"middle": [], |
|
"last": "Sugano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Bulling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1608.05203" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yusuke Sugano and Andreas Bulling. 2016. Seeing with humans: Gaze-assisted neural image caption- ing. arXiv preprint arXiv:1608.05203.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "Object-based visual attention for computer vision. Artificial intelligence", |
|
"authors": [ |
|
{ |
|
"first": "Yaoru", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Fisher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "146", |
|
"issue": "", |
|
"pages": "77--123", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yaoru Sun and Robert Fisher. 2003. Object-based vi- sual attention for computer vision. Artificial intelli- gence, 146(1):77-123.", |
|
"links": null |
|
}, |
|
"BIBREF66": { |
|
"ref_id": "b66", |
|
"title": "Movieqa: Understanding stories in movies through question-answering", |
|
"authors": [ |
|
{ |
|
"first": "Makarand", |
|
"middle": [], |
|
"last": "Tapaswi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rainer", |
|
"middle": [], |
|
"last": "Stiefelhagen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raquel", |
|
"middle": [], |
|
"last": "Urtasun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4631--4640", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Makarand Tapaswi, Yukun Zhu, Rainer Stiefelhagen, Antonio Torralba, Raquel Urtasun, and Sanja Fidler. 2016. Movieqa: Understanding stories in movies through question-answering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4631-4640.", |
|
"links": null |
|
}, |
|
"BIBREF67": { |
|
"ref_id": "b67", |
|
"title": "Newsqa: A machine comprehension dataset", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Trischler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xingdi", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Sordoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Bachman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaheer", |
|
"middle": [], |
|
"last": "Suleman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Rep4NLP@ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Trischler, Tong Wang, Xingdi Yuan, Justin Har- ris, Alessandro Sordoni, Philip Bachman, and Ka- heer Suleman. 2017. Newsqa: A machine compre- hension dataset. In Rep4NLP@ACL.", |
|
"links": null |
|
}, |
|
"BIBREF68": { |
|
"ref_id": "b68", |
|
"title": "Visual attention for solving multiple-choice science problem: An eye-tracking analysis", |
|
"authors": [ |
|
{ |
|
"first": "Meng-Jung", |
|
"middle": [], |
|
"last": "Tsai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huei-Tse", |
|
"middle": [], |
|
"last": "Hou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng-Lung", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wan-Yi", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fang-Ying", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Computers & Education", |
|
"volume": "58", |
|
"issue": "1", |
|
"pages": "375--385", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meng-Jung Tsai, Huei-Tse Hou, Meng-Lung Lai, Wan- Yi Liu, and Fang-Ying Yang. 2012. Visual atten- tion for solving multiple-choice science problem: An eye-tracking analysis. Computers & Education, 58(1):375-385.", |
|
"links": null |
|
}, |
|
"BIBREF69": { |
|
"ref_id": "b69", |
|
"title": "Intentional response distortion on personality tests: Using eye-tracking to understand response processes when faking", |
|
"authors": [ |
|
{ |
|
"first": "Van", |
|
"middle": [], |
|
"last": "Edwin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marise", |
|
"middle": [], |
|
"last": "Hooft", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ph Born", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Journal of Applied Psychology", |
|
"volume": "97", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edwin AJ Van Hooft and Marise Ph Born. 2012. Inten- tional response distortion on personality tests: Using eye-tracking to understand response processes when faking. Journal of Applied Psychology, 97(2):301.", |
|
"links": null |
|
}, |
|
"BIBREF70": { |
|
"ref_id": "b70", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF71": { |
|
"ref_id": "b71", |
|
"title": "Graph attention networks", |
|
"authors": [ |
|
{ |
|
"first": "Petar", |
|
"middle": [], |
|
"last": "Veli\u010dkovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillem", |
|
"middle": [], |
|
"last": "Cucurull", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arantxa", |
|
"middle": [], |
|
"last": "Casanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adriana", |
|
"middle": [], |
|
"last": "Romero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Lio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1710.10903" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Petar Veli\u010dkovi\u0107, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. 2017. Graph attention networks. arXiv preprint arXiv:1710.10903.", |
|
"links": null |
|
}, |
|
"BIBREF72": { |
|
"ref_id": "b72", |
|
"title": "Analyzing the structure of attention in a transformer language model", |
|
"authors": [ |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Vig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--76", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jesse Vig and Yonatan Belinkov. 2019. Analyzing the structure of attention in a transformer language model. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 63-76.", |
|
"links": null |
|
}, |
|
"BIBREF73": { |
|
"ref_id": "b73", |
|
"title": "A compare-aggregate model for matching text sequences", |
|
"authors": [ |
|
{ |
|
"first": "Shuohang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ICLR 2017: International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuohang Wang and Jing Jiang. A compare-aggregate model for matching text sequences.(2017). In ICLR 2017: International Conference on Learning Repre- sentations, Toulon, France, April 24-26: Proceed- ings, pages 1-15.", |
|
"links": null |
|
}, |
|
"BIBREF74": { |
|
"ref_id": "b74", |
|
"title": "Attention is not not explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Wiegreffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuval", |
|
"middle": [], |
|
"last": "Pinter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Wiegreffe and Yuval Pinter. 2019. Attention is not not explanation. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 11-20.", |
|
"links": null |
|
}, |
|
"BIBREF75": { |
|
"ref_id": "b75", |
|
"title": "Covert visual attention modulates facespecific activity in the human fusiform gyrus: fmri study", |
|
"authors": [ |
|
{ |
|
"first": "Ewa", |
|
"middle": [], |
|
"last": "Wojciulik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nancy", |
|
"middle": [], |
|
"last": "Kanwisher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jon", |
|
"middle": [], |
|
"last": "Driver", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Journal of neurophysiology", |
|
"volume": "79", |
|
"issue": "3", |
|
"pages": "1574--1578", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ewa Wojciulik, Nancy Kanwisher, and Jon Driver. 1998. Covert visual attention modulates face- specific activity in the human fusiform gyrus: fmri study. Journal of neurophysiology, 79(3):1574- 1578.", |
|
"links": null |
|
}, |
|
"BIBREF76": { |
|
"ref_id": "b76", |
|
"title": "Show, attend and tell: Neural image caption generation with visual attention", |
|
"authors": [ |
|
{ |
|
"first": "Kelvin", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhudinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International conference on machine learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2048--2057", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kelvin Xu, Jimmy Ba, Ryan Kiros, Kyunghyun Cho, Aaron Courville, Ruslan Salakhudinov, Rich Zemel, and Yoshua Bengio. 2015. Show, attend and tell: Neural image caption generation with visual atten- tion. In International conference on machine learn- ing, pages 2048-2057.", |
|
"links": null |
|
}, |
|
"BIBREF77": { |
|
"ref_id": "b77", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NeurIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime G. Car- bonell, Ruslan Salakhutdinov, and Quoc V. Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In NeurIPS.", |
|
"links": null |
|
}, |
|
"BIBREF78": { |
|
"ref_id": "b78", |
|
"title": "Hierarchical Attention Networks for Document Classification", |
|
"authors": [ |
|
{ |
|
"first": "Zichao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diyi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Smola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alexander J. Smola, and Eduard H. Hovy. 2016. Hi- erarchical Attention Networks for Document Classi- fication. In HLT-NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF79": { |
|
"ref_id": "b79", |
|
"title": "Qanet: Combining local convolution with global self-attention for reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Adams", |
|
"middle": [ |
|
"Wei" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Dohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Norouzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.09541" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adams Wei Yu, David Dohan, Minh-Thang Luong, Rui Zhao, Kai Chen, Mohammad Norouzi, and Quoc V Le. 2018. Qanet: Combining local convolution with global self-attention for reading comprehen- sion. arXiv preprint arXiv:1804.09541.", |
|
"links": null |
|
}, |
|
"BIBREF80": { |
|
"ref_id": "b80", |
|
"title": "Adversarial examples: Attacks and defenses for deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoyong", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qile", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaolin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "IEEE transactions on neural networks and learning systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaoyong Yuan, Pan He, Qile Zhu, and Xiaolin Li. 2019. Adversarial examples: Attacks and defenses for deep learning. IEEE transactions on neural net- works and learning systems.", |
|
"links": null |
|
}, |
|
"BIBREF81": { |
|
"ref_id": "b81", |
|
"title": "Significance testing of the spearman rank correlation coefficient", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Jerrold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1972, |
|
"venue": "Journal of the American Statistical Association", |
|
"volume": "67", |
|
"issue": "339", |
|
"pages": "578--580", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jerrold H Zar. 1972. Significance testing of the spear- man rank correlation coefficient. Journal of the American Statistical Association, 67(339):578-580.", |
|
"links": null |
|
}, |
|
"BIBREF82": { |
|
"ref_id": "b82", |
|
"title": "Self-attention generative adversarial networks", |
|
"authors": [ |
|
{ |
|
"first": "Han", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Goodfellow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitris", |
|
"middle": [], |
|
"last": "Metaxas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Augustus", |
|
"middle": [], |
|
"last": "Odena", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7354--7363", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Han Zhang, Ian Goodfellow, Dimitris Metaxas, and Au- gustus Odena. 2019. Self-attention generative adver- sarial networks. In International Conference on Ma- chine Learning, pages 7354-7363.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "Example attention distributions of neural models (cnn, lstm, xlnet) and humans.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Figure 2: An exemplary scan path shows a reading pattern. The red circle corresponds to the location of the current fixation. Its size is proportional to the duration of the fixation.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"text": "XLNet versus humans -KL divergence and number of correct models per document.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"text": "Models attention vs. human visual attention.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"text": "CNN and LSTM versus humans -KL divergence and number of correct CNN models per document. CNN and XLNet versus humans -KL divergence and number of correct CNN models per document.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF5": { |
|
"type_str": "figure", |
|
"text": "In thisFigure weshow the KL divergence to human attention of CNN and the LSTM models (cf.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF7": { |
|
"type_str": "figure", |
|
"text": "LSTM word level attention distribution", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"text": "Pairwise comparison of the average KL divergence for the three models. Here we show the comparison of each model against each other", |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"text": "Yukun Zheng, Jiaxin Mao, Yiqun Liu, Zixin Ye, Min Zhang, and Shaoping Ma. 2019. Human behavior inspired machine reading comprehension. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 425-434. ACM.", |
|
"content": "<table><tr><td>A Appendix</td></tr><tr><td>A.1 Analysis Results -Models vs. Humans</td></tr></table>", |
|
"html": null, |
|
"num": null |
|
} |
|
} |
|
} |
|
} |