|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:08:20.351944Z" |
|
}, |
|
"title": "Evaluating Attribution Methods using White-Box LSTMs", |
|
"authors": [ |
|
{ |
|
"first": "Yiding", |
|
"middle": [], |
|
"last": "Hao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Yale University New Haven", |
|
"location": { |
|
"region": "CT", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Interpretability methods for neural networks are difficult to evaluate because we do not understand the black-box models typically used to test them. This paper proposes a framework in which interpretability methods are evaluated using manually constructed networks, which we call white-box networks, whose behavior is understood a priori. We evaluate five methods for producing attribution heatmaps by applying them to white-box LSTM classifiers for tasks based on formal languages. Although our white-box classifiers solve their tasks perfectly and transparently, we find that all five attribution methods fail to produce the expected model explanations.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Interpretability methods for neural networks are difficult to evaluate because we do not understand the black-box models typically used to test them. This paper proposes a framework in which interpretability methods are evaluated using manually constructed networks, which we call white-box networks, whose behavior is understood a priori. We evaluate five methods for producing attribution heatmaps by applying them to white-box LSTM classifiers for tasks based on formal languages. Although our white-box classifiers solve their tasks perfectly and transparently, we find that all five attribution methods fail to produce the expected model explanations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Attribution methods are a family of interpretability techniques for individual neural network predictions that attempt to measure the importance of input features for determining the model's output. Given an input, an attribution method produces a vector of attribution or relevance scores, which is typically visualized as a heatmap that highlights portions of the input that contribute to model behavior. In the context of NLP, attribution scores are usually computed at the token level, so that each score represents the importance of a token within an input sequence. These heatmaps can be used to identify keywords upon which networks base their decisions (Li et al., 2016; Sundararajan et al., 2017; Arras et al., 2017a,b; Murdoch et al., 2018, inter alia) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 661, |
|
"end": 678, |
|
"text": "(Li et al., 2016;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 679, |
|
"end": 705, |
|
"text": "Sundararajan et al., 2017;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 706, |
|
"end": 728, |
|
"text": "Arras et al., 2017a,b;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 729, |
|
"end": 762, |
|
"text": "Murdoch et al., 2018, inter alia)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One of the main challenges facing the evaluation of attribution methods is that it is difficult to assess the quality of a heatmap when the network in question is not understood in the first place. If a word is deemed relevant by an attribution method, we do not know whether the model actually considers that word relevant, or whether the attribu-tion method has erroneously estimated its importance. Indeed, previous studies have argued that attribution methods are sensitive to features unrelated to model behavior in some cases (e.g., Kindermans et al., 2019) , and altogether insensitive to model behavior in others (Adebayo et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 539, |
|
"end": 563, |
|
"text": "Kindermans et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 621, |
|
"end": 643, |
|
"text": "(Adebayo et al., 2018)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To tease the evaluation of attribution methods apart from the interpretation of models, this paper proposes an evaluation framework for attribution methods in NLP that uses only models that are fully understood a priori. Instead of testing attribution methods on black-box models obtained through training, we construct white-box models for testing by directly setting network parameters by hand. Our focus is on white-box LSTMs that implement intuitive strategies for solving simple classification tasks based on formal languages with deterministic solutions. We apply our framework to five attribution methods: occlusion (Zeiler and Fergus, 2014) , saliency (Simonyan et al., 2014; Li et al., 2016) , gradient \u00d7 input, (G \u00d7 I, Shrikumar et al., 2017) , integrated gradients (IG, Sundararajan et al., 2017) , and layer-wise relevance propagation (LRP, Bach et al., 2015) . In doing so, we make the following contributions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 623, |
|
"end": 648, |
|
"text": "(Zeiler and Fergus, 2014)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 660, |
|
"end": 683, |
|
"text": "(Simonyan et al., 2014;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 684, |
|
"end": 700, |
|
"text": "Li et al., 2016)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 721, |
|
"end": 752, |
|
"text": "(G \u00d7 I, Shrikumar et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 776, |
|
"end": 807, |
|
"text": "(IG, Sundararajan et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 847, |
|
"end": 871, |
|
"text": "(LRP, Bach et al., 2015)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We construct four white-box LSTMs that can be used to test attribution methods. We provide a complete description of our model weights in Appendix A. 1 Beyond the five methods considered here, our white-box networks can be used to test any attribution method compatible with LSTMs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Empirically, we show that all five attribution methods produce erroneous heatmaps for our white-box networks, despite the models' transparent behavior. As a preview of our re-Task: Determine whether the input contains one of the following subsequences: ab, bc, cd, or dc. Output: True, since the input aacb contains two (noncontiguous) instances of ab.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Occlusion Saliency G \u00d7 I IG LRP a a c b a a c b a a c b a a c b a a c b a a c b a a c b a a c b a a c b a a c b Table 1 : Sample heatmaps for two white-box networks: a \"counter-based\" network (top) and an \"FSA-based\" network (bottom). The features relevant to the output are the two as and the b.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 129, |
|
"text": "G \u00d7 I IG LRP a a c b a a c b a a c b a a c b a a c b a a c b a a c b a a c b a a c b a a c b Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "sults, Table 1 shows sample heatmaps computed for two models designed to identify the non-contiguous subsequence ab in the input aacb. Even though both models' outputs are determined by the presence of the two as and the b, all four methods either incorrectly highlight the c or fail to highlight at least one of the as in at least one case.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 14, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We identify two general ways in which four of the five methods do not behave as intended. Firstly, while saliency, G \u00d7 I and IG are theoretically invariant to differences in model implementation (Sundararajan et al., 2017) , in practice we find that these methods can still produce qualitatively different heatmaps for nearly identical models. Secondly, we find that LRP is susceptible to numerical issues, which cause heatmaps to be zeroed out when values are rounded to zero.", |
|
"cite_spans": [ |
|
{ |
|
"start": 197, |
|
"end": 224, |
|
"text": "(Sundararajan et al., 2017)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Several approaches have been taken in the literature for understanding how to evaluate attribution methods. On a theoretical level, axiomatic approaches propose formal desiderata that attribution methods should satisfy, such as implementation invariance (Sundararajan et al., 2017) , input translation invariance (Kindermans et al., 2019) , continuity with respect to inputs (Montavon et al., 2018; Ghorbani et al., 2019) , or the existence of relationships between attribution scores and logit or softmax scores (Sundararajan et al., 2017; Ancona et al., 2018; Montavon, 2019) . The degree to which attribution methods fulfill these criteria can be determined either mathematically or empirically.", |
|
"cite_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 281, |
|
"text": "(Sundararajan et al., 2017)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 313, |
|
"end": 338, |
|
"text": "(Kindermans et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 398, |
|
"text": "(Montavon et al., 2018;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 399, |
|
"end": 421, |
|
"text": "Ghorbani et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 513, |
|
"end": 540, |
|
"text": "(Sundararajan et al., 2017;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 541, |
|
"end": 561, |
|
"text": "Ancona et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 577, |
|
"text": "Montavon, 2019)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Other approaches, which are more experimental in nature, attempt to directly assess the relationship between attribution scores and model behav-ior. A common test, due to Bach et al. (2015) and Samek et al. (2017) and applied to sequence modeling by Arras et al. (2017a) , involves ablating or perturbing parts of the input, from those with the highest attribution scores to those with the lowest, and counting the number of features that need to be ablated in order to change the model's prediction. Another test, proposed by Adebayo et al. (2018) , tracks how heatmaps change as layers of a network are incrementally randomized.", |
|
"cite_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 189, |
|
"text": "Bach et al. (2015)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 194, |
|
"end": 213, |
|
"text": "Samek et al. (2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 250, |
|
"end": 270, |
|
"text": "Arras et al. (2017a)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 527, |
|
"end": 548, |
|
"text": "Adebayo et al. (2018)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "A third kind of approach evaluates the extent to which heatmaps identify salient input features. For example, Zhang et al. (2018) propose the pointing game task, in which the highest-relevance pixel for an image classifier input must belong to the object described by the target output class. Within this framework, ), Poerner et al. (2018 , Arras et al. (2019) , and Yang and Kim (2019) construct datasets in which input features exhibit experimentally controlled notions of importance, yielding \"ground truth\" attributions against which heatmaps can be evaluated.", |
|
"cite_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 129, |
|
"text": "Zhang et al. (2018)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 339, |
|
"text": "), Poerner et al. (2018", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 361, |
|
"text": "Arras et al. (2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 387, |
|
"text": "Yang and Kim (2019)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our paper incorporates elements of the groundtruth approaches, since it is straightforward to determine which input features are important for our formal language tasks. We enhance these approaches by using white-box models that are guaranteed to be sensitive to those features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Formal languages are often used to evaluate the expressive power of RNNs. Here, we focus on formal languages that have been recently used to probe LSTMs' ability to capture three kinds of dependencies: counting, long-distance, and hierarchical dependencies. We define a classification task based on each of these formal languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Formal Language Tasks", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Counter languages (Fischer, 1966; Fischer et al., 1968) are languages recognized by automata equipped with counters. Weiss et al. (2018) demonstrate using an acceptance task for the languages a n b n and a n b n c n that LSTMs naturally learn to use cell state units as counters. Merrill's (2019) asymptotic analysis shows that LSTM acceptors accept only counter languages when their weights are fully saturated. Thus, counter languages may be viewed as a characterization of the expressive power of LSTMs. We define the counting task based on a simple example of a counting language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 33, |
|
"text": "(Fischer, 1966;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 34, |
|
"end": 55, |
|
"text": "Fischer et al., 1968)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 136, |
|
"text": "Weiss et al. (2018)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 296, |
|
"text": "Merrill's (2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Counting Dependencies", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Task 1 (Counting Task). Given a string in x \u2208 {a, b} * , determine whether or not x has strictly more as than bs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Counting Dependencies", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Example 2. The counting task classifies aaab as True, ab as False, and bbbba as False.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Counting Dependencies", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "A counter automaton can solve the counting task by incrementing its counter whenever an a is encountered and decrementing it whenever a b is encountered. It outputs True if and only if its counter is at least 1. We expect attribution scores for all input symbols to have roughly the same magnitude, but that scores assigned to a will have the opposite sign to those assigned to b.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Counting Dependencies", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Strictly piecewise (SP, Heinz, 2007) languages were used by Avcu et al. (2017) and Kelleher (2018, 2019a,b) to test the propensity of LSTMs to learn long-distance dependencies, compared to Elman's (1990) simple recurrent networks. SP languages are regular languages whose membership is defined by the presence or absence of certain subsequences, which may or may not be contiguous. For example, ad is a subsequence of abcde, since both letters of ad occur in abcde, in the same order. Based on these ideas, we define the SP task as follows.", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 36, |
|
"text": "(SP, Heinz, 2007)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 60, |
|
"end": 78, |
|
"text": "Avcu et al. (2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 83, |
|
"end": 107, |
|
"text": "Kelleher (2018, 2019a,b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Long-Distance Dependencies", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Task 3 (SP Task). Given x \u2208 {a, b, c, d} * , determine whether or not x contains at least one of the following subsequences: ab, bc, cd, dc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Long-Distance Dependencies", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Example 4. In the SP task, aab is classified as True, since it contains the subsequence ab. Similarly, acb is classified as True, since it contains ab non-contiguously. The string aaa is classified as False.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Long-Distance Dependencies", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The choice of SP languages as a test for longdistance dependencies is motivated by the fact that symbols in a non-contiguous subsequence may occur arbitrarily far from one another. The SP task yields a variant of the pointing game task in the sense that the input string may or may not contain an \"object\" (one of the four subsequences) that the network must identify. Therefore, we expect an input symbol to receive a nonzero attribution score if and only if it comprises a subsequence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Long-Distance Dependencies", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The Dyck language is the language D generated by the following context-free grammar, where \u03b5 is the empty string.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Dependencies", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "S \u2192 SS | (S) | [S] | \u03b5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Dependencies", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "D contains all balanced strings of parentheses and square brackets. Since D is often viewed as a canonical example of a context-free language (Chomsky and Sch\u00fctzenberger, 1959) , several recent studies, including Sennhauser and Berwick (2018), Bernardy (2018), Skachkova et al. (2018) , and Yu et al. (2019) , have used D to evaluate whether LSTMs can learn hierarchical dependencies implemented by pushdown automata. Here, we consider the bracket prediction task proposed by Sennhauser and Berwick (2018).", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 176, |
|
"text": "(Chomsky and Sch\u00fctzenberger, 1959)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 284, |
|
"text": "Skachkova et al. (2018)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 291, |
|
"end": 307, |
|
"text": "Yu et al. (2019)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Dependencies", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Task 5 (Bracket Prediction Task). Given a prefix p of some string in D, identify the next valid closing bracket for p. In heatmaps for the bracket prediction task, we expect the last unclosed bracket to receive the highest-magnitude relevance score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Dependencies", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We use two approaches to construct white-box networks for our tasks. In the counter-based approach, the cell state contains a set of counters, which are incremented or decremented throughout the computation. The network's final output is based on the values of the counters. In the automaton-based approach, we use the LSTM to simulate an automaton, with the cell state containing a representation of the automaton's state. We use a counter-based network to solve the counter task and an automaton-based network to solve the bracket prediction task. We use both kinds of networks to solve the SP task. All networks perfectly solve the tasks they were designed for. This section describes our white-box networks at a high level; a detailed description is given in Appendix A.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "White-Box Networks", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In the rest of this paper, we identify the alphabet symbols a, b, c, and d with the one-hot vectors for indices 1, 2, 3, and 4, respectively. The vectors f (t) , i (t) , and o (t) represent the forget, input, and output gates, respectively. g (t) is the value added to the cell state at each time step, and \u03c3 represents the sigmoid function. We assume that the hidden state h (t) and cell state c (t) are updated as follows.", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 159, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 164, |
|
"end": 167, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 179, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 246, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 379, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 397, |
|
"end": 400, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "White-Box Networks", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "c (t) = f (t) \u2299 c (t\u22121) + i (t) \u2299 g (t) h (t) = o (t) \u2299 tanh ( c (t) ) 4.1 Counter-Based Networks", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "White-Box Networks", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In the counter-based approach, each position of the cell state contains the value of a counter. To adjust the counter in position j by some value v \u2208 (\u22121, 1), we set g (t) j = v, and we saturate the gates by setting them to \u03c3(m) \u2248 1, where m \u226b 0 is a large constant. For example, our network for the counting task uses a single hidden unit, with the gates always saturated and with g (t) given by", |
|
"cite_spans": [ |
|
{ |
|
"start": 384, |
|
"end": 387, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "White-Box Networks", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "g (t) = tanh ( u [ 1 \u22121 ] x (t) ) ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "White-Box Networks", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "where u > 0 is a hyperparameter that scales the counter by a factor of v = tanh(u). 2 When x (t) = a, we have g (t) = v, so the counter is incremented by v. When x (t) = b, we compute g (t) = \u2212v, so the counter is decremented by v.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 115, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 189, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "White-Box Networks", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For the SP task, we use seven counters. The first four counters record how many occurrences of each symbol have been observed at time step t. The next three counters record the number of bs, cs, and ds that form one of the four distinguished subsequences with an earlier symbol. For example, after seeing the input aaabbc, the counterbased network for the SP task satisfies", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "White-Box Networks", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "c (6) = v [ 3 2 1 0 2 1 0 ] \u22a4 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "White-Box Networks", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The first four counters represent the fact that the input has 3 as, 2 bs, 1 c, and no ds. Counter #5 is 2v because the two bs form a subsequence with the as, and counter #6 is v because the c forms a subsequence with the bs. The logit scores of our counter-based networks are computed by a linear decoder using the tanh of the counter values. For the counting task, the score of the True class is h (t) , while the score of the False class is fixed to tanh(v)/2. This means that the network outputs True if and only if the final counter value is at least v. For the SP task, the score of the True class is h", |
|
"cite_spans": [ |
|
{ |
|
"start": 399, |
|
"end": 402, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "White-Box Networks", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "(t) 5 + h (t) 6 + h (t) 7", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "White-Box Networks", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": ", while the score of the False class is again tanh(v)/2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "White-Box Networks", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We consider two types of automata-based networks: one that implements a finite-state automaton (FSA) for the SP task, and one that implements a pushdown automaton (PDA) for the bracket prediction task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Our FSA construction is similar to Korsky and Berwick's (2019) FSA construction for simple recurrent networks. Consider a deterministic FSA A with states Q and alphabet \u03a3. To simulate A using an LSTM, we use |Q| \u2022 |\u03a3| hidden units, with the following interpretation. Suppose that A transitions to state q after reading input (t) . The hidden state h (t) is a onehot representation of the pair \u27e8 q, x (t) \u27e9 , which encodes both the current state of A and the most recent input symbol. Since the FSA undergoes a state transition with each input symbol, the forget gate always clears c (t) , so that information written to the cell state does not persist beyond a single time step. The output layer simply detects whether or not the FSA is in an accepting state. Details are provided in Appendix A.3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 325, |
|
"end": 328, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 583, |
|
"end": 586, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "x (1) , x (2) , . . . , x", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Next, we describe how to implement a PDA for the bracket prediction task. We use a stack containing all unclosed brackets observed in the input string, and make predictions based on the top item of the stack. We represent a bounded stack of size k using 2k + 1 hidden units. The first k \u2212 1 positions contain all stack items except the top item, with ( represented by the value 1, [ represented by \u22121, and empty positions represented by 0. The kth position contains the top item of the stack. The next k positions contain the height of the stack in unary notation, and the last position contains a bit indicating whether or not the stack is empty. For example, after reading the input ([(() with a stack of size 4, the stack contents ([( are represented by", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "c (5) = [ 1 \u22121 0 1 1 1 1 0 0 ] \u22a4 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The 1 in position 4 indicates that the top item of the stack is (, and the 1, \u22121, and 0 in positions 1-3 indicate that the remainder of the stack is ([. The three 1s in positions 5-8 indicate that the stack height is 3, and the 0 in position 9 indicates that the stack is not empty. When x (t) is ( or [, it is copied to c is copied to the highest empty position in c", |
|
"cite_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 293, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": ":k\u22121 , pushing the opening bracket to the stack. The empty stack bit is then set to 0, marking the stack as non-empty. When the current input symbol is a closing bracket, the highest item of positions 1 through k \u2212 1 is deleted and copied to position k, popping the top item from the stack. Because the PDA network is quite complex, we focus here on describing how the top stack item in position k is determined, and leave other details for Appendix A.4. Let \u03b1 (t) be 1 if x (t) = (, \u22121 if x (t) = [, and 0 otherwise. At each time step, g", |
|
"cite_spans": [ |
|
{ |
|
"start": 475, |
|
"end": 478, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Name Formula Saliency R (c) t,i (X) = \u2202\u0177c \u2202x (t) i x (t) i =X t,i G \u00d7 I R (c) t,i (X) = Xt,i \u2202\u0177c \u2202x (t) i x (t) i =X t,i IG R (c) t,i (X) = Xt,i \u222b 1 0 \u2202\u0177c \u2202x (t) i x (t) i =\u03b1X t,i d\u03b1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "(t) k = tanh ( m \u2022 u (t) )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": ", where m \u226b 0 and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "u (t) = 2 k \u03b1 (t) + k\u22121 \u2211 j=1 2 j\u22121 h (t\u22121) j .", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Observe that m \u2022 u (t) \u226b 0 when \u03b1 (t) = 1, and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "m \u2022 u (t) \u226a 0 when \u03b1 (t) = \u22121. Thus, g", |
|
"eq_num": "(t)" |
|
} |
|
], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "k contains the stack encoding of the current input symbol if it is an opening bracket. If the current input symbol is a closing bracket, then \u03b1 (t) = 0, so the sign of u (t) is determined by the highest item of h (t\u22121)", |
|
"cite_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 173, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": ":k\u22121 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automata-Based Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Let X be a matrix of input vectors, such that the input at time t is the row vector X t,: = ( x (t) ) \u22a4 . Given X, an LSTM classifier produces a vector y of logit scores. Based on X,\u0177, and possibly a baseline input X, an attribution method assigns an attribution score R (c) t,i (X) to input feature X t,i for each output class c. These feature-level scores are then aggregated to produce token-level scores:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attribution Methods", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "R (c) t (X) = \u2211 i R (c) t,i (X).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attribution Methods", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Broadly speaking, our five attribution methods are grouped into three types: one perturbation-based, three gradient-based, and one decompositionbased. The following subsections describe how each method computes R (c) t,i (X).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attribution Methods", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Perturbation-based methods are premised on the idea that if X t,i is an important input feature, then changing the value of X t,i would cause\u0177 to change. The one perturbation method we consider is occlusion. In this method, R", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Perturbation-and Gradient-Based Methods", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "t,i (X) is the change in\u0177 c observed when X t,: is replaced by 0.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Perturbation-and Gradient-Based Methods", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Gradient-based methods rely on the same intuition as perturbation-based methods, but use automatic differentiation to simulate infinitesimal perturbations. The definitions of our three gradientbased methods are given in Table 2 . The most basic of these is saliency, which simply measures relevance by the derivative of the logit score with respect to each input feature. G \u00d7 I attempts to improve upon saliency by using the first-order terms in a Taylor-series approximation of the model instead of the gradients on their own. IG is designed to address the issue of small gradients found in saturated units by integrating G \u00d7 I along the line connecting X to a baseline input X, here taken to be the zero matrix.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 227, |
|
"text": "Table 2", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Perturbation-and Gradient-Based Methods", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Decomposition-based methods are methods that satisfy the relation", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y c = R (c) bias + \u2211 t,i R (c) t,i (X),", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "R (c)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "bias is a relevance score assigned to the bias units of the network. The interpretation of equation 2is that the logit score\u0177 c is \"distributed\" among the input features and the bias units, so that the relevance scores form a \"decomposition\" of\u0177 c .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The one decomposition-based method we consider is LRP, which computes scores using a backpropagation algorithm that distributes scores layer by layer. The scores of the output layer are initialized to", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "r (c,output) i = {\u0177 i , i = c 0, otherwise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "For each layer l with activation z (l) , activation function f (l) , and output a (l) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 38, |
|
"text": "(l)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 63, |
|
"end": 66, |
|
"text": "(l)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 82, |
|
"end": 85, |
|
"text": "(l)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "= f (l) ( z (l) )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": ", the relevance r (c,l) of a (l) is determined by the following propagation rule:", |
|
"cite_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 23, |
|
"text": "(c,l)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 29, |
|
"end": 32, |
|
"text": "(l)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "r (c,l) i = \u2211 l \u2032 \u2211 j r (c,l \u2032 ) j W (l \u2032 \u2190l) j,i a (l) i z (l \u2032 ) j + sign ( z (l \u2032 ) j ) \u03b5 ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "where l \u2032 ranges over all layers to which l has a forward connection via W (l \u2032 \u2190l) and \u03b5 > 0 is a stabilizing constant. 3 For the LSTM gate interactions, we follow Arras et al. (2017b) in treating multiplicative connections of the form a (l 1 ) \u2299a (l 2 ) as activation functions of the form", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 122, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 165, |
|
"end": 185, |
|
"text": "Arras et al. (2017b)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "a (l 1 ) \u2299 f (l 2 ) (\u2022),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "where a (l 1 ) is f (t) , i (t) , or o (t) . The final attribution scores are given by the values propagated to the input layer:", |
|
"cite_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 23, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 28, |
|
"end": 31, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 39, |
|
"end": 42, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "R (c) t,i (X) = r (c,input t ) i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": ".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decomposition-Based Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "To evaluate attribution methods under our framework, we begin with a qualitative description of the heatmaps that are computed for our whitebox networks, based on the illustrative sample of heatmaps appearing in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 212, |
|
"end": 219, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative Evaluation", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Occlusion, G \u00d7 I, and IG are well-behaved for the counting task. As expected, these methods assign a a positive value and b a negative value when the output class for attribution is c = True. When the number of as is different from the number of bs, occlusion assigns a lower-magnitude score to the symbol with fewer instances. When c = False, all relevance scores are 0. This is because\u0177 False is fixed to a constant value supplied by a bias term, so input features cannot affect its value. Saliency and LRP both fail to produce nonzero scores, at least in some cases. Saliency scores satisfy R", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Counting Task", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "(True) t,1 (X) = \u2212R (True) t,2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Counting Task", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "(X), resulting in token-level scores of 0 for all inputs. Heatmaps #3 and #4 show that LRP assigns scores of 0 to prefixes containing equal numbers of as and bs. We will see in Subsection 7.1 that this phenomenon appears to be related to the fact that the LSTM gates are saturated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Counting Task", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "We obtain radically different heatmaps for the two SP task networks, despite the fact that they produce the same classifications for all inputs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP Task", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "For the counter-based network, all methods except for saliency assign positive scores for c = True to symbols constituting one of the four subsequences, and scores of zero elsewhere. The saliency heatmaps do not adhere to this pattern, and instead generally assign higher scores to tokens occurring near the end of the input. Heatmaps #7-10 show that LRP fails to assign positive scores to the first symbol of each subsequence, while the other methods generally do not. 4 The LRP behavior reflects the fact that the initial a does not increment the subsequence counters, which determine the final logit score. In contrast, the behavior of occlusion, G \u00d7 I, and IG is explained by the fact that removing either the a or the b destroys the subsequence. Note that the as in heatmap #9 receive scores of 0 from occlusion and G \u00d7 I, since removing only one of the two as does not destroy the subsequence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 470, |
|
"end": 471, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP Task", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "For the FSA-based network, saliency, G \u00d7 I, and LRP assign only the last symbol a nonzero score when the relevance output class c matches the network's predicted class. IG appears to produce erratic heatmaps, exhibiting no immediately obvious pattern. Although occlusion appears to be erratic at first glance, its behavior can be explained by the fact that changing x (t) to 0 causes h (t) to be 0, which the LSTM interprets as the initial state of the FSA; thus, R (c) t (X) \u0338 = 0 precisely when X t+1:,: is classified differently from X. In all cases, the heatmaps for the FSA-based network diverge significantly from the expected heatmaps.", |
|
"cite_spans": [ |
|
{ |
|
"start": 368, |
|
"end": 371, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 386, |
|
"end": 389, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP Task", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "The heatmaps for the PDA-based network also differ strikingly from those of the other networks, in that the gradient-based methods never assign nonzero scores. This is because equation 1causes g (t) to be highly saturated, resulting in zero gradients. In the case of LRP, the matching bracket is highlighted when c \u0338 = None. When the matching bracket is not the last symbol of the input, the other unclosed brackets are also highlighted, with progressively smaller magnitudes, and with brackets of the opposite type from c receiving negative scores. This pattern reflects the mechanism of (1), in which progressively larger powers of 2 are used to determine the content copied to c (t) k . When the relevance output class is c = None, LRP assigns opening brackets a negative score, revealing the fact that those input symbols set the bit c (t) 2k+1 to indicate that the stack is not empty. Although occlusion sometimes highlights the matching bracket, it does not appear to be consistent in doing so. For example, it fails to highlight the matching bracket [ ( [ ( [ [ ( [ ( [ [ ( [ ( [ [ ( [ ( [ [ ( [ 22 ) in heatmap #21, and highlights one other bracket in heatmaps #23-24.", |
|
"cite_spans": [ |
|
{ |
|
"start": 195, |
|
"end": 198, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 840, |
|
"end": 843, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1057, |
|
"end": 1107, |
|
"text": "[ ( [ ( [ [ ( [ ( [ [ ( [ ( [ [ ( [ ( [ [ ( [ 22 )", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bracket Prediction Task", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "We now turn to focused investigations of particular phenomena that attribution methods exhibit when applied to white-box networks. Subsection 7.1 begins by discussing the effect of network saturation on the gradient-based methods and LRP. In Subsection 7.2 we apply Bach et al.'s (2015) ablation test to our attribution methods for the SP task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 266, |
|
"end": 286, |
|
"text": "Bach et al.'s (2015)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detailed Evaluations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "As mentioned in the previous section, network saturation causes gradients to be approximately 0 when using sigmoid or tanh activation functions. 100.0 98.7 12 1.000 \u22123.33 \u00d7 10 \u22126 100.0 99.8 Table 5 : The results of the LRP saturation test, including the value of m, the average value of c (t) when the counter reaches 0, the network's testing accuracy, and the percentage of examples with blank heatmaps for prefixes with equal numbers of as and bs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 292, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 197, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Saturation", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "by saturation, Table 4 shows heatmaps for the input accb generated by gradient-based methods for different instantiations of the counter-based SP network with varying degrees of saturation. Recall from Section 4 that counter values for this network are expressed in multiples of the scaling factor v. We control the saturation of the network via the parameter u = tanh \u22121 (v). For all three gradient-based methods, scores for a decrease and scores for b increase as u increases. Additionally, saliency scores for the first c decrease when u increases. When u = 8, v is almost completely saturated, causing G \u00d7 I to produce all-zero heatmaps.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 22, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Saturation", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "On the other hand, IG is still able to produce nonzero heatmaps even at u = 64. Thus, IG is much more resistant to the effects of saturation than G \u00d7 I. According to Sundararajan et al. (2017) , gradient-based methods satisfy the axiom of implementation invariance: they produce the same heatmaps for any two networks that compute the same function. This formal property is seemingly at odds with the diverse array of heatmaps appearing in Table 4 , which are produced for networks that all yield identical classifiers. In particular, the networks with u = 8, 16, and 64 yield qualitatively different heatmaps, despite the fact that the three networks are distinguished only by differences in v of less than 0.001. Because the three functions are technically not equal, implementation invariance is not violated in theory; but the fact that IG produces different heatmaps for three nearly identical networks shows that the intuition described by implementation invariance is not borne out in practice.", |
|
"cite_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 192, |
|
"text": "Sundararajan et al. (2017)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 440, |
|
"end": 447, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Saturation", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "Besides the gradient-based methods, LRP is also susceptible to problems arising from saturation. Recall from heatmaps #3 and #4 of Table 3 that for the counting task network, LRP assigns scores of 0 to prefixes with equal numbers of as and bs. We hypothesize that this phenomenon is related to the fact c (t) = 0 after reading such prefixes, since the counter has been incremented and decremented in equal amounts. Accordingly, we test whether this phenomenon can be mitigated by desaturating the gates so that c (t) does not exactly reach 0. Recall that the white-box LSTM gates approximate 1 \u2248 \u03c3(m) using a constant m \u226b 0. We construct networks with varying values of m and compute LRP scores on a randomly generated testing set of 1000 strings, each of which contains at least one prefix with equal numbers of as and bs. In Table 5 we report the percentage of examples for which such prefixes receive LRP scores of 0, along with the network's accuracy on this testing set and the average value of c (t) when the counter reaches 0. Indeed, the percentage of prefixes receiving scores of 0 increases as the approximation c (t) \u2248 0 becomes more exact.", |
|
"cite_spans": [ |
|
{ |
|
"start": 513, |
|
"end": 516, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1002, |
|
"end": 1005, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 138, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 827, |
|
"end": 834, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Saturation", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "So far, we have primarily compared attribution methods via visual inspection of individual examples. To compare the five methods quantitatively, Table 6 : Mean and standard deviation results of the ablation test, normalized by string length and expressed as a percentage. \"Optimal\" is the best possible score.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 152, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Test", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "we apply the ablation test of Bach et al. (2015) to our two white-box networks for the SP task. 5 Given an input string classified as True, we iteratively remove the symbol with the highest relevance score, recomputing heatmaps at each iteration, until the string no longer contains any of the four subsequences. We apply the ablation test to 100 randomly generated input strings, and report the average percentage of each string that is ablated in Table 6 . A peculiar property of the SP task is that removing a symbol preserves the validity of input strings. This means that, unlike in NLP settings, our ablation test does not suffer from the issue that ablation produces invalid inputs. Saliency, G \u00d7 I, and LRP perform close to the random baseline on the FSA network; this is unsurprising, since these methods only assign nonzero scores to the last input symbol. While Table 3 shows some variation in the IG heatmaps, IG also performs close to the random baseline. Only occlusion performs considerably better, since it is able to identify symbols whose ablation would destroy subsequences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 48, |
|
"text": "Bach et al. (2015)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 96, |
|
"end": 97, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 449, |
|
"end": 456, |
|
"text": "Table 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 873, |
|
"end": 880, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Test", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "On the counter-based SP network, IG performs remarkably close to the optimal benchmark, which represents the best possible performance on this task. Occlusion, G \u00d7 I, and LRP achieve a similar level of performance to one another, while saliency performs worse than the random baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Test", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "Of all the heatmaps considered in this paper, only those computed by G \u00d7 I and IG for the counting task fully matched our expectations. In other cases, all attribution methods fail to identify at least some of the input features that should be considered relevant, or assign relevance to input features that do not affect the model's behavior. Among the five methods, saliency achieves the worst performance: it never assigns nonzero scores for the counting and bracket prediction tasks, and it does not identify the relevant symbols for either of the two SP networks. Saliency also achieves the worst performance on the ablation test for both the counterbased and the FSA-based SP networks. Among the four white-box networks, the two automatabased networks proved to be much more challenging for the attribution methods than the counterbased networks. While the LRP heatmaps for the PDA network correctly identify the matching bracket when available, no other method produces reasonable heatmaps for the PDA network, and all five methods fail to interpret the FSA network.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Taken together, our results suggest that attribution heatmaps should be viewed with skepticism. This paper has identified cases in which heatmaps fail to highlight relevant features, as well as cases in which heatmaps incorrectly highlight irrelevant features. Although most of the methods perform better for the counter-based networks than the automaton-based networks, in practical settings we do not know what kinds of computations are implemented by a trained network, making it impossible to determine whether the network under analysis is compatible with the attribution method being used.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "In future work, we encourage the use of our four white-box models as qualitative benchmarks for evaluating interpretability methods. For example, the style of evaluation we have developed can be replicated for attribution methods not covered in this paper, including DeepLIFT (Shrikumar et al., 2017) and contextual decomposition (Murdoch et al., 2018) . We believe that insights gleaned from white-box analysis can help researchers choose between different attribution methods and identify areas of improvement in current techniques. This appendix provides detailed descriptions of our four white-box networks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 267, |
|
"end": 300, |
|
"text": "DeepLIFT (Shrikumar et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 330, |
|
"end": 352, |
|
"text": "(Murdoch et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "As described in Subsection 4.1, the network for the counting task simply sets g (t) to v = tanh(u) when x (t) = a and \u2212v when x (t) = b. All gates are fixed to 1. The output layer uses h (t) = tanh ( c (t) ) as the score for the True class and v/2 as the score for the False class.", |
|
"cite_spans": [ |
|
{ |
|
"start": 80, |
|
"end": 83, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 205, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "g (t) = tanh ( u [ 1 \u22121 ] x (t) ) f (t) = \u03c3(m) i (t) = \u03c3(m) o (t) = \u03c3(m)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "y (t) = [ 1 0 ] h (t) + [ 0 v/2 ] A.2 SP Task Network (Counter-Based)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The seven counters for the SP task are implemented as follows. First, we compute g (t) under the assumption that one of the first four counters is always incremented, and one of the last three counters is always incremented as long as x (t) \u0338 = a.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 86, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "g (t) = tanh \uf8eb \uf8ec \uf8ec \uf8ed u \uf8ee \uf8ef \uf8ef \uf8f0 I 4 0 1 0 0 0 0 1 0 0 0 0 1 \uf8f9 \uf8fa \uf8fa \uf8fb x (t) \uf8f6 \uf8f7 \uf8f7 \uf8f8", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Then, we use the input gate to condition the last three counters on the value of the first four counters. For example, if h (t\u22121) 1 = 0, then no as have been encountered in the input string before time t. In that case, the input gate for counter #5, which represents subsequences ending with b, is set to i (t) 5 = \u03c3(\u2212m) \u2248 0. This is because a b encountered at time t would not form part of a subsequence if no as have been encountered so far, so counter #5 should not be incremented.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "i (t) = \u03c3 \uf8eb \uf8ec \uf8ec \uf8ed 2m \uf8ee \uf8ef \uf8ef \uf8f0 0 0 1 0 0 0 0 1 0 1 0 0 1 0 0 \uf8f9 \uf8fa \uf8fa \uf8fb h (t\u22121) +m [ 1 1 1 1 \u22121 \u22121 \u22121 ] \u22a4 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "All other gates are fixed to 1. The output layer sets the score of the True class to h", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "(t) 5 + h (t) 6 + h", |
|
"eq_num": "(t)" |
|
} |
|
], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "7 and the score of the False class to v/2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "f (t) = \u03c3(m1) o (t) = \u03c3(m1) y (t) = [ 0 1 1 1 0 0 0 0 ] h (t) + [ 0 v/2 ] A.3 FSA Network", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here we describe a general construction of an LSTM simulating an FSA with states Q, accepting states Q F \u2286 Q, alphabet \u03a3, and transition function \u03b4 : Q \u00d7 \u03a3 \u2192 Q. Recall that h (t) contains a one-hot representation of pairs in Q \u00d7 \u03a3 encoding the current state of the FSA and the most recent input symbol. The initial state h (0) = 0 represents the starting configuration of the FSA.", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 178, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "At a high level, the state transition system works as follows. First, g (t) first marks all the positions corresponding to the current input x (t) . 6", |
|
"cite_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 75, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 143, |
|
"end": 146, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "g (t) \u27e8q,x\u27e9 = { v, x = x (t) 0, otherwise", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The input gate then filters out any positions that do not represent valid transitions from the previous state q \u2032 , which is recovered from h (t\u22121) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 147, |
|
"text": "(t\u22121)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "i (t) \u27e8q,x\u27e9 = { 1, \u03b4(q \u2032 , x) = q 0, otherwise", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Now, we describe how this behavior is implemented in our LSTM. The cell state update is straightforwardly implemented as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "g (t) = tanh ( uW (c,x) x (t) )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ",", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W (c,x)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u27e8q,x\u27e9,j = { 1, j is the index for x 0, otherwise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Observe that the matrix W (c,x) essentially contains a copy of I 4 for each state, such that each copy is distributed across the different cell state units designated for that state. The input gate is more complex. First, the bias term handles the case where the current case is the starting state q 0 . This is necessary because the initial configuration of the network is represented by h (0) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 31, |
|
"text": "(c,x)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 394, |
|
"text": "(0)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "= 0. b (i) \u27e8q,x\u27e9 = { m, \u03b4(q 0 , x) = q \u2212m, otherwise", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The bias vector sets i (t) \u27e8q,x\u27e9 to be 1 if the FSA transitions from q 0 to q after reading x, and 0 otherwise. We replicate this behavior for other values 6 We use v = tanh(1) \u2248 0.762. of h (t\u22121) by using the weight matrix W (i,h) , taking the bias vector into account:", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 26, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 157, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 191, |
|
"end": 196, |
|
"text": "(t\u22121)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 231, |
|
"text": "(i,h)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "i (t) = \u03c3 ( W (i,h) h (t\u22121) + b (i) )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ",", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W (i) \u27e8q,x\u27e9,\u27e8q \u2032 ,x \u2032 \u27e9 = { m \u2212 b (i) \u27e8q,x\u27e9 , \u03b4(q \u2032 , x) = q \u2212m \u2212 b (i) \u27e8q,x\u27e9 , otherwise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The forget gate is fixed to \u22121, since the state needs to be updated at every time step. The output gate is fixed to 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "f (t) = \u03c3(\u2212m1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "o (t) = \u03c3(m1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The output layer simply selects hidden units that represent accepting and rejecting states:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "y (t) = W h (t) ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W c,\u27e8q,x\u27e9 = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1, c = True and q \u2208 Q F 1, c = False and q / \u2208 Q F 0, otherwise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Counting Task Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, we describe how the PDA network for the bracket prediction task is implemented. Of the four networks, this one is the most intricate. Recall from Subsection 4.2 that we implement a bounded stack of size k using 2k + 1 hidden units, with the following interpretation: \u2022 c 2k+1 is a bit, which is set to be positive if the stack is empty and nonpositive otherwise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We represent the brackets (, [, ) , and ] in onehot encoding with the indices 1, 2, 3, and 4, respectively. The opening brackets ( and [ are represented on the stack by 1 and \u22121, respectively. T", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 33, |
|
"text": "(, [, )", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We begin by describing g (t) . Due to the complexity of the network, we describe the weights and biases individually, which are combined as follows. (g,t) ))", |
|
"cite_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 28, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 154, |
|
"text": "(g,t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "g (t) = tanh ( m ( z", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ", where z (g,t) = W (c,x) x (t) + W (c,h ) h (t\u22121) + b (c) First, the bias vector sets c (t) 2k+1 to be 1, indicating that the stack is empty. This ensures that the initial hidden state h (t) = 0 is treated as an empty stack. c,x) serves three functions when x (t) is an open bracket, and does nothing when x (t) is a closing bracket. First, it pushes x (t) to the top of the stack, represented by c k+1:2k to 1 in order to increment the unary counter for the height of the stack. Later, we will see that the input gate filters out all positions except for the top of the stack. Finally, W (c,x) sets the empty stack indicator to \u22121, indicating that the stack is not empty.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 15, |
|
"text": "(g,t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 20, |
|
"end": 25, |
|
"text": "(c,x)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 28, |
|
"end": 31, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 36, |
|
"end": 40, |
|
"text": "(c,h", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 55, |
|
"end": 58, |
|
"text": "(c)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 89, |
|
"end": 92, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 230, |
|
"text": "c,x)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 264, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 354, |
|
"end": 357, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 590, |
|
"end": 595, |
|
"text": "(c,x)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "b (c) = [ 0 2 ] W (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W (c,x) (c,h) performs two functions. First, it completes equation (1) for c (t) k , setting it to be the secondhighest stack item from the previous time step. Second, it copies the top of the stack to the first k \u2212 1 positions, with the input gate filtering out all but the highest position.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 7, |
|
"text": "(c,x)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 8, |
|
"end": 13, |
|
"text": "(c,h)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 77, |
|
"end": 80, |
|
"text": "(t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "= \uf8ee \uf8ef \uf8ef \uf8f0 0 0 0 0 2 k \u22122 k 0 0 1 1 0 0 \u22122 \u22122 0 0 \uf8f9 \uf8fa \uf8fa \uf8fb W", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W (c,h) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 7, |
|
"text": "(c,h)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "= \uf8ee \uf8ef \uf8ef \uf8f0 0 1 0 0 2 4 \u2022 \u2022 \u2022 2 k\u22121 0 0 0 0 0 0 0 0 0 \u22121 0 \uf8f9 \uf8fa \uf8fa \uf8fb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, the \u22121s serve to decrease the empty stack indicator by an amount proportional to the stack height at time t \u2212 1. Observe that if x (t) is a closing bracket and h (t\u22121) represents a stack with only one item, then = \u22121 + 2 = 1, so the empty stack indicator is set to 1, indicating that the stack is empty. Otherwise,", |
|
"cite_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 176, |
|
"text": "(t\u22121)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W (c,x)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W (c,x)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "2k+1,:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "x (t) + W (c,h)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "2k+1,: h (t\u22121) \u2264 \u22122, so the empty stack indicator is nonpositive. Now, we describe the input gate, given by the following.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "i (t) = \u03c3 ( m (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "z (i,t) ))", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 7, |
|
"text": "(i,t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "z (i,t) = W (i,x) x (t) + W (i,h ) h (t\u22121) + b (i) W (i,x) sets the input gate for the first k \u2212 1 positions to 0 when x (t) is a closing bracket. In that case, an item needs to be popped from the stack, so nothing can be copied to these hidden units. When x (t) is an opening bracket, W (i,x) sets i (t) k = 1, so that the bracket can be copied to the top of the stack.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 7, |
|
"text": "(i,t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 12, |
|
"end": 17, |
|
"text": "(i,x)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 20, |
|
"end": 23, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 28, |
|
"end": 32, |
|
"text": "(i,h", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 47, |
|
"end": 50, |
|
"text": "(i)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 53, |
|
"end": 58, |
|
"text": "(i,x)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 262, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 293, |
|
"text": "(i,x)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W (i,x) (i,h) uses a matrix T n \u2208 R n\u00d7n , defined below. Suppose v represents the number s in unary notation: v j is 1 if j \u2264 s and 0 otherwise. T n has the special property that T n v is a one-hot vector for s. Based on this, W (i,h) is defined as follows.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 7, |
|
"text": "(i,x)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 8, |
|
"end": 13, |
|
"text": "(i,h)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 234, |
|
"text": "(i,h)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "= 2 \uf8ee \uf8f0 0 0 \u22121 \u22121 1 1 0 0 0 \uf8f9 \uf8fb W", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W (i,h) = 2 \uf8ee \uf8ef \uf8ef \uf8f0 0 (T k ) :k\u22121,: 0 (T k ) :k\u22121,: 0 0 \uf8f9 \uf8fa \uf8fa \uf8fb W (i,h)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ":k\u22121,k+1:2k contains T k , with the last row truncated. This portion of the matrix converts h (t\u22121) k+1:2k , which contains a unary encoding of the stack height, to a one-hot vector marking the position of the top of the stack. This ensures that, when pushing to the stack, the top stack item from time t \u2212 1 is only copied to the appropriate position of h (t) :k\u22121 . The other copy of T k , again with the last row omitted, occurs in W (i,h) k+2:2k,k+1:2k . This copy of T k ensures that when the unary counter for the stack height is incremented, only the appropriate position is updated. Finally, the bias vector ensures that the top stack item and the empty stack indicator are always updated.", |
|
"cite_spans": [ |
|
{ |
|
"start": 357, |
|
"end": 360, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 437, |
|
"end": 442, |
|
"text": "(i,h)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "b (i) = \uf8ee \uf8ef \uf8ef \uf8f0 \u22121 1 \u22121 1 \uf8f9 \uf8fa \uf8fa \uf8fb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The forget gate is responsible for deleting portions of memory when stack items are popped.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "f (t) = \u03c3 ( m (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "z (f,t) ))", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 7, |
|
"text": "(f,t)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "z (f,t) = W (f,x) x (t) + W (f,h) h (t\u22121) + b (f ) W (f,x) first ensures that no stack items are deleted when an item is pushed to the stack.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 7, |
|
"text": "(f,t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 12, |
|
"end": 17, |
|
"text": "(f,x)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 20, |
|
"end": 23, |
|
"text": "(t)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 46, |
|
"end": 50, |
|
"text": "(f )", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 53, |
|
"end": 58, |
|
"text": "(f,x)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W (f,x) = 2 \uf8ee \uf8ef \uf8ef \uf8f0 1 1 0 0 0 0 0 0 1 1 0 0 0 0 0 0 \uf8f9 \uf8fa \uf8fa \uf8fb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Next, W (f,h) marks the second highest stack position and the top of the unary counter for deletion, in case an item needs to be popped.", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 13, |
|
"text": "(f,h)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W (f,h) = 2 \uf8ee \uf8ef \uf8ef \uf8f0 0 \u2212 (T k ) 2:,: 0 \u2212T k 0 0 \uf8f9 \uf8fa \uf8fa \uf8fb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, the bias term ensures that the top stack item and empty stack indicator are always cleared.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "b (i) = \uf8ee \uf8ef \uf8ef \uf8f0 1 \u22121 1 \u22121 \uf8f9 \uf8fa \uf8fa \uf8fb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To complete the construction, we fix the output gate to 1, and have the output layer read the top stack position: ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "o (t) = \u03c3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 PDA Network", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also provide code for our models at https:// github.com/yidinghao/whitebox-lstm.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use u = 0.5 for the counting task, u = 0.7 for the SP task, and m = 50 for both tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use \u03b5 = 0.001.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Although it is difficult to see, IG assigns a small positive score to the bs in heatmaps #7 and #8.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We do not consider the counting task because its heatmaps are already easy to understand, and we do not consider the PDA network because the gradient-based methods fail to produce nonzero heatmaps for that network.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "I would like to thank Dana Angluin and Robert Frank for their advice and mentorship on this project. I would also like to thank Yoav Goldberg, John Lafferty, Tal Linzen, R. Thomas Mc-Coy, Aaron Mueller, Karl Mulligan, Shauli Ravfogel, Jason Shaw, and the reviewers for their helpful feedback and discussion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Sanity Checks for Saliency Maps", |
|
"authors": [ |
|
{ |
|
"first": "Julius", |
|
"middle": [], |
|
"last": "Adebayo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Gilmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Muelly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Goodfellow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Moritz", |
|
"middle": [], |
|
"last": "Hardt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "31", |
|
"issue": "", |
|
"pages": "9505--9515", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julius Adebayo, Justin Gilmer, Michael Muelly, Ian Goodfellow, Moritz Hardt, and Been Kim. 2018. Sanity Checks for Saliency Maps. In Advances in Neural Information Processing Systems 31, vol- ume 31, pages 9505-9515, Montreal, Canada. Cur- ran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Towards better understanding of gradient-based attribution methods for Deep Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Ancona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enea", |
|
"middle": [], |
|
"last": "Ceolini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cengiz\u00f6ztireli", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canada", |
|
"middle": [], |
|
"last": "Vancouver", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Openreview", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ICLR 2018 Conference Track", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Ancona, Enea Ceolini, Cengiz\u00d6ztireli, and Markus Gross. 2018. Towards better understanding of gradient-based attribution methods for Deep Neu- ral Networks. In ICLR 2018 Conference Track, Van- couver, Canada. OpenReview.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "What is relevant in a text document?\": An interpretable machine learning approach", |
|
"authors": [ |
|
{ |
|
"first": "Leila", |
|
"middle": [], |
|
"last": "Arras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franziska", |
|
"middle": [], |
|
"last": "Horn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "PLOS ONE", |
|
"volume": "12", |
|
"issue": "8", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1371/journal.pone.0181142" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leila Arras, Franziska Horn, Gr\u00e9goire Montavon, Klaus-Robert M\u00fcller, and Wojciech Samek. 2017a. \"What is relevant in a text document?\": An inter- pretable machine learning approach. PLOS ONE, 12(8):e0181142.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Explaining Recurrent Neural Network Predictions in Sentiment Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Leila", |
|
"middle": [], |
|
"last": "Arras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 8th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "159--168", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-5221" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leila Arras, Gr\u00e9goire Montavon, Klaus-Robert M\u00fcller, and Wojciech Samek. 2017b. Explaining Recurrent Neural Network Predictions in Sentiment Analysis. In Proceedings of the 8th Workshop on Computa- tional Approaches to Subjectivity, Sentiment and So- cial Media Analysis, pages 159-168, Copenhagen, Denmark. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Evaluating Recurrent Neural Network Explanations", |
|
"authors": [ |
|
{ |
|
"first": "Leila", |
|
"middle": [], |
|
"last": "Arras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Osman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "113--126", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leila Arras, Ahmed Osman, Klaus-Robert M\u00fcller, and Wojciech Samek. 2019. Evaluating Recurrent Neu- ral Network Explanations. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 113- 126, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Subregular Complexity and Deep Learning", |
|
"authors": [ |
|
{ |
|
"first": "Enes", |
|
"middle": [], |
|
"last": "Avcu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chihiro", |
|
"middle": [], |
|
"last": "Shibata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Heinz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Conference on Logic and Machine Learning in Natural Language", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "20--33", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Enes Avcu, Chihiro Shibata, and Jeffrey Heinz. 2017. Subregular Complexity and Deep Learning. In Proceedings of the Conference on Logic and Ma- chine Learning in Natural Language (LaML 2017), Gothenburg, 12-13 June 2017, volume 1 of CLASP Papers in Computational Linguistics, pages 20-33, Gothenburg, Sweden. Centre for Linguistic Theory and Studies in Probability (CLASP), University of Gothenburg.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "On Pixel-Wise Explanations for Non-Linear Classifier Decisions by Layer-Wise Relevance Propagation", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Bach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Binder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frederick", |
|
"middle": [], |
|
"last": "Klauschen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "PLOS ONE", |
|
"volume": "10", |
|
"issue": "7", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1371/journal.pone.0130140" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Bach, Alexander Binder, Gr\u00e9goire Mon- tavon, Frederick Klauschen, Klaus-Robert M\u00fcller, and Wojciech Samek. 2015. On Pixel-Wise Ex- planations for Non-Linear Classifier Decisions by Layer-Wise Relevance Propagation. PLOS ONE, 10(7):e0130140.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Can Recurrent Neural Networks Learn Nested Recursion? Linguistic Issues in Language Technology", |
|
"authors": [ |
|
{ |
|
"first": "Jean-Philippe", |
|
"middle": [], |
|
"last": "Bernardy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "16", |
|
"issue": "", |
|
"pages": "1--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jean-Philippe Bernardy. 2018. Can Recurrent Neural Networks Learn Nested Recursion? Linguistic Is- sues in Language Technology, 16(1):1-20.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "The Algebraic Theory of Context-Free Languages", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Chomsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Sch\u00fctzenberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1959, |
|
"venue": "Studies in Logic and the Foundations of Mathematics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "118--161", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/S0049-237X(09)70104-1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N. Chomsky and M. P. Sch\u00fctzenberger. 1959. The Algebraic Theory of Context-Free Languages. In P. Braffort and D. Hirschberg, editors, Studies in Logic and the Foundations of Mathematics, vol- ume 26 of Computer Programming and Formal Systems, pages 118-161. North-Holland Publishing Company, Amsterdam, Netherlands.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Finding Structure in Time", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Elman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Cognitive Science", |
|
"volume": "14", |
|
"issue": "2", |
|
"pages": "179--211", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/0364-0213(90)90002-E" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey L. Elman. 1990. Finding Structure in Time. Cognitive Science, 14(2):179-211.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Turing Machines with Restricted Memory Access", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Patrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fischer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1966, |
|
"venue": "Information and Control", |
|
"volume": "9", |
|
"issue": "4", |
|
"pages": "364--379", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/S0019-9958(66)80003-7" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrick C. Fischer. 1966. Turing Machines with Re- stricted Memory Access. Information and Control, 9(4):364-379.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Counter Machines and Counter Languages. Mathematical systems theory", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Patrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Albert", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Fischer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arnold", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Meyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rosenberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1968, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "265--283", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/BF01694011" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrick C. Fischer, Albert R. Meyer, and Arnold L. Rosenberg. 1968. Counter Machines and Counter Languages. Mathematical systems theory, 2(3):265- 283.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Interpretation of Neural Networks Is Fragile", |
|
"authors": [ |
|
{ |
|
"first": "Amirata", |
|
"middle": [], |
|
"last": "Ghorbani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abubakar", |
|
"middle": [], |
|
"last": "Abid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "3681--3688", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v33i01.33013681" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amirata Ghorbani, Abubakar Abid, and James Zou. 2019. Interpretation of Neural Networks Is Fragile. Proceedings of the AAAI Conference on Artificial In- telligence, 33(01):3681-3688.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Inductive Learning of Phonotactic Patterns", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey Nicholas", |
|
"middle": [], |
|
"last": "Heinz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Nicholas Heinz. 2007. Inductive Learning of Phonotactic Patterns. PhD Dissertation, University of California, Los Angeles, Los Angeles, CA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Interpretability Beyond Feature Attribution: Quantitative Testing with Concept Activation Vectors (TCAV)", |
|
"authors": [ |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Wattenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Gilmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carrie", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Wexler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernanda", |
|
"middle": [], |
|
"last": "Viegas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rory", |
|
"middle": [], |
|
"last": "Sayres", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "80", |
|
"issue": "", |
|
"pages": "2668--2677", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Been Kim, Martin Wattenberg, Justin Gilmer, Car- rie Cai, James Wexler, Fernanda Viegas, and Rory Sayres. 2018. Interpretability Beyond Feature At- tribution: Quantitative Testing with Concept Acti- vation Vectors (TCAV). In International Confer- ence on Machine Learning, 10-15 July 2018, Stock- holmsm\u00e4ssan, Stockholm Sweden, volume 80 of Proceedings of Machine Learning Research, pages 2668-2677, Stockholm, Sweden. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "The (Un)reliability of Saliency Methods", |
|
"authors": [ |
|
{ |
|
"first": "Pieter-Jan", |
|
"middle": [], |
|
"last": "Kindermans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Hooker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julius", |
|
"middle": [], |
|
"last": "Adebayo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Alber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kristof", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sven", |
|
"middle": [], |
|
"last": "Sch\u00fctt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dumitru", |
|
"middle": [], |
|
"last": "D\u00e4hne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Erhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Explainable AI: Interpreting, Explaining and Visualizing Deep Learning, number 11700 in Lecture Notes in Computer Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "267--280", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-28954-6_14" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pieter-Jan Kindermans, Sara Hooker, Julius Ade- bayo, Maximilian Alber, Kristof T. Sch\u00fctt, Sven D\u00e4hne, Dumitru Erhan, and Been Kim. 2019. The (Un)reliability of Saliency Methods. In Woj- ciech Samek, Gr\u00e9goire Montavon, Andrea Vedaldi, Lars Kai Hansen, and Klaus-Robert M\u00fcller, edi- tors, Explainable AI: Interpreting, Explaining and Visualizing Deep Learning, number 11700 in Lec- ture Notes in Computer Science, pages 267-280.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "On the Computational Power of RNNs. Computing Research Repository", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Korsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Berwick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.06349" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel A. Korsky and Robert C. Berwick. 2019. On the Computational Power of RNNs. Computing Re- search Repository, arXiv:1906.06349.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Visualizing and Understanding Neural Models in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinlei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "681--691", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1082" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Xinlei Chen, Eduard Hovy, and Dan Juraf- sky. 2016. Visualizing and Understanding Neural Models in NLP. In Proceedings of the 2016 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 681-691, San Diego, CA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Multi-Element Long Distance Dependencies: Using SP k Languages to Explore the Characteristics of Long-Distance Dependencies", |
|
"authors": [ |
|
{ |
|
"first": "Abhijit", |
|
"middle": [], |
|
"last": "Mahalunkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Kelleher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Workshop on Deep Learning and Formal Languages: Building Bridges", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "34--43", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-3904" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhijit Mahalunkar and John Kelleher. 2019a. Multi- Element Long Distance Dependencies: Using SP k Languages to Explore the Characteristics of Long- Distance Dependencies. In Proceedings of the Work- shop on Deep Learning and Formal Languages: Building Bridges, pages 34-43, Florence, Italy. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Using Regular Languages to Explore the Representational Capacity of Recurrent Neural Architectures", |
|
"authors": [ |
|
{ |
|
"first": "Abhijit", |
|
"middle": [], |
|
"last": "Mahalunkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Kelleher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-01424-7_19" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhijit Mahalunkar and John D. Kelleher. 2018. Us- ing Regular Languages to Explore the Representa- tional Capacity of Recurrent Neural Architectures.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Understanding Recurrent Neural Architectures by Analyzing and Synthesizing Long Distance Dependencies in Benchmark Sequential Datasets", |
|
"authors": [ |
|
{ |
|
"first": "Abhijit", |
|
"middle": [], |
|
"last": "Mahalunkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Kelleher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Computing Research Repository", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.02966v3" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhijit Mahalunkar and John D. Kelleher. 2019b. Un- derstanding Recurrent Neural Architectures by An- alyzing and Synthesizing Long Distance Dependen- cies in Benchmark Sequential Datasets. Computing Research Repository, arXiv:1810.02966v3.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Sequential Neural Networks as Automata", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Merrill", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Workshop on Deep Learning and Formal Languages: Building Bridges", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--13", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-3901" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Merrill. 2019. Sequential Neural Networks as Automata. In Proceedings of the Workshop on Deep Learning and Formal Languages: Building Bridges, pages 1-13, Florence, Italy. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Gradient-Based Vs", |
|
"authors": [ |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-28954-6_13" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gr\u00e9goire Montavon. 2019. Gradient-Based Vs.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Propagation-Based Explanations: An Axiomatic Comparison", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Explainable AI: Interpreting, Explaining and Visualizing Deep Learning, number 11700 in Lecture Notes in Computer Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "253--265", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-28954-6_13" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Propagation-Based Explanations: An Axiomatic Comparison. In Wojciech Samek, Gr\u00e9goire Mon- tavon, Andrea Vedaldi, Lars Kai Hansen, and Klaus- Robert M\u00fcller, editors, Explainable AI: Interpreting, Explaining and Visualizing Deep Learning, number 11700 in Lecture Notes in Computer Science, pages 253-265. Springer International Publishing, Cham, Switzerland.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Methods for interpreting and understanding deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Digital Signal Processing", |
|
"volume": "73", |
|
"issue": "", |
|
"pages": "1--15", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.dsp.2017.10.011" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gr\u00e9goire Montavon, Wojciech Samek, and Klaus- Robert M\u00fcller. 2018. Methods for interpreting and understanding deep neural networks. Digital Signal Processing, 73:1-15.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Beyond Word Importance: Contextual Decomposition to Extract Interactions from LSTMs", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Murdoch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ICLR 2018 Conference Track", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "W. James Murdoch, Peter J. Liu, and Bin Yu. 2018. Beyond Word Importance: Contextual Decomposi- tion to Extract Interactions from LSTMs. In ICLR 2018 Conference Track, Vancouver, Canada. Open- Review.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Activation Differences. Computing Research Repository", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1605.01713" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Activation Differences. Computing Research Repos- itory, arXiv:1605.01713.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps", |
|
"authors": [ |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Vedaldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ICLR 2014 Workshop Proceedings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karen Simonyan, Andrea Vedaldi, and Andrew Zis- serman. 2014. Deep Inside Convolutional Net- works: Visualising Image Classification Models and Saliency Maps. In ICLR 2014 Workshop Proceed- ings, Banff, Canada. arXiv.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Closing Brackets with Recurrent Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Skachkova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Trost", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dietrich", |
|
"middle": [], |
|
"last": "Klakow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "232--239", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-5425" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Natalia Skachkova, Thomas Trost, and Dietrich Klakow. 2018. Closing Brackets with Recurrent Neural Networks. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and In- terpreting Neural Networks for NLP, pages 232-239, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Axiomatic Attribution for Deep Networks", |
|
"authors": [ |
|
{ |
|
"first": "Mukund", |
|
"middle": [], |
|
"last": "Sundararajan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Taly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiqi", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 34th International Conference on Machine Learning", |
|
"volume": "70", |
|
"issue": "", |
|
"pages": "3319--3328", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mukund Sundararajan, Ankur Taly, and Qiqi Yan. 2017. Axiomatic Attribution for Deep Networks. In Pro- ceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Ma- chine Learning Research, pages 3319-3328, Sydney, Australia. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "On the Practical Computational Power of Finite Precision RNNs for Language Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Gail", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eran", |
|
"middle": [], |
|
"last": "Yahav", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "740--745", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-2117" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gail Weiss, Yoav Goldberg, and Eran Yahav. 2018. On the Practical Computational Power of Finite Preci- sion RNNs for Language Recognition. In Proceed- ings of the 56th Annual Meeting of the Association for Computational Linguistics, volume 2: Short Pa- pers, pages 740-745, Melbourne, Australia. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Benchmarking Attribution Methods with Relative Feature Importance. Computing Research Repository", |
|
"authors": [ |
|
{ |
|
"first": "Mengjiao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.09701" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mengjiao Yang and Been Kim. 2019. Bench- marking Attribution Methods with Relative Fea- ture Importance. Computing Research Repository, arXiv:1907.09701.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Learning the Dyck Language with Attention-based Seq2Seq Models", |
|
"authors": [ |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngoc", |
|
"middle": [ |
|
"Thang" |
|
], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Kuhn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "138--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiang Yu, Ngoc Thang Vu, and Jonas Kuhn. 2019. Learning the Dyck Language with Attention-based Seq2Seq Models. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 138-146, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Visualizing and Understanding Convolutional Networks", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Zeiler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fergus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Computer Vision -ECCV 2014", |
|
"volume": "8689", |
|
"issue": "", |
|
"pages": "818--833", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-319-10590-1_53" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew D. Zeiler and Rob Fergus. 2014. Visualiz- ing and Understanding Convolutional Networks. In Computer Vision -ECCV 2014, volume 8689 of Lecture Notes in Computer Science, pages 818-833, Zurich, Switzerland. Springer International Publish- ing.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Top-Down Neural Attention by Excitation Backprop", |
|
"authors": [ |
|
{ |
|
"first": "Jianming", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [ |
|
"Adel" |
|
], |
|
"last": "Bargal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Brandt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohui", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stan", |
|
"middle": [], |
|
"last": "Sclaroff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Journal of Computer Vision", |
|
"volume": "126", |
|
"issue": "10", |
|
"pages": "1084--1102", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s11263-017-1059-x" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianming Zhang, Sarah Adel Bargal, Zhe Lin, Jonathan Brandt, Xiaohui Shen, and Stan Sclaroff. 2018. Top-Down Neural Attention by Excitation Back- prop. International Journal of Computer Vision, 126(10):1084-1102.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "The string [([] requires a prediction of ), since the ( is the last unclosed bracket. Similarly, (()[ requires a prediction of ]. Strings with no unclosed brackets, such as [()], require a prediction of None." |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "). Red represents positive values and blue represents negative values. Heatmaps with all values within the range of \u00b11 \u00d7 10 \u22125 are shown as all 0s." |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "contains the height of the stack in unary notation" |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "values \u00b12 k are determined by equation (1) in Subsection 4.2. Second, it sets g (t)" |
|
}, |
|
"FIGREF6": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "2k+1,:x (t) + W (c,h) 2k+1,: h (t\u22121) + b (c) 2k+1" |
|
}, |
|
"FIGREF8": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "and j = k \u22121, c = ] and j = k 1, c = None and j = 2k + 1 0, otherwise." |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Definitions of the gradient-based methods.", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table><tr><td>(True) t</td><td>(accb)</td></tr><tr><td colspan=\"2\">for the counter-based SP network, with 0.6 \u2264 u \u2264 64.</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Gradient-based heatmaps of R", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">m \u03c3(m)</td><td>c (t)</td><td colspan=\"2\">Accuracy % Blank</td></tr><tr><td>4</td><td colspan=\"2\">0.982 \u22128.74 \u00d7 10 \u22123</td><td>90.1</td><td>0.2</td></tr><tr><td>5</td><td colspan=\"2\">0.993 \u22123.48 \u00d7 10 \u22123</td><td>96.1</td><td>2.2</td></tr><tr><td>6</td><td colspan=\"2\">0.998 \u22121.32 \u00d7 10 \u22123</td><td>99.8</td><td>6.5</td></tr><tr><td>7</td><td colspan=\"2\">0.999 \u22124.91 \u00d7 10 \u22124</td><td>100.0</td><td>22.0</td></tr><tr><td>8</td><td colspan=\"2\">1.000 \u22121.81 \u00d7 10 \u22124</td><td>100.0</td><td>42.1</td></tr><tr><td>9</td><td colspan=\"2\">1.000 \u22126.68 \u00d7 10 \u22125</td><td>100.0</td><td>69.9</td></tr><tr><td colspan=\"3\">10 1.000 \u22122.46 \u00d7 10 \u22125</td><td>100.0</td><td>92.3</td></tr><tr><td colspan=\"3\">11 1.000 \u22129.05 \u00d7 10 \u22126</td><td/></tr></table>", |
|
"type_str": "table", |
|
"text": "To test how attribution methods are affected", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |