|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:58:46.211407Z" |
|
}, |
|
"title": "Syntactic Perturbations Reveal Representational Correlates of Hierarchical Phrase Structure in Pretrained Language Models", |
|
"authors": [ |
|
{ |
|
"first": "Matteo", |
|
"middle": [], |
|
"last": "Alleman", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Columbia University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Mamou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Intel Labs MIT-IBM", |
|
"institution": "", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [ |
|
"A Del" |
|
], |
|
"last": "Rio", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hanlin", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Intel Labs MIT-IBM", |
|
"institution": "", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sueyeon", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Columbia University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Watson", |
|
"middle": [], |
|
"last": "Ai", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "While vector-based language representations from pretrained language models have set a new standard for many NLP tasks, there is not yet a complete accounting of their inner workings. In particular, it is not entirely clear what aspects of sentence-level syntax are captured by these representations, nor how (if at all) they are built along the stacked layers of the network. In this paper, we aim to address such questions with a general class of interventional, input perturbation-based analyses of representations from pretrained language models. Importing from computational and cognitive neuroscience the notion of representational invariance, we perform a series of probes designed to test the sensitivity of these representations to several kinds of structure in sentences. Each probe involves swapping words in a sentence and comparing the representations from perturbed sentences against the original. We experiment with three different perturbations: (1) random permutations of n-grams of varying width, to test the scale at which a representation is sensitive to word position; (2) swapping of two spans which do or do not form a syntactic phrase, to test sensitivity to global phrase structure; and (3) swapping of two adjacent words which do or do not break apart a syntactic phrase, to test sensitivity to local phrase structure. Results from these probes collectively suggest that Transformers build sensitivity to larger parts of the sentence along their layers, and that hierarchical phrase structure plays a role in this process. More broadly, our results also indicate that structured input perturbations widens the scope of analyses that can be performed on often-opaque deep learning systems, and can serve as a complement to existing tools (such as supervised linear probes) for interpreting complex black-box models. 1 1 Datasets, extracted features and code will be publicly available upon publication.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "While vector-based language representations from pretrained language models have set a new standard for many NLP tasks, there is not yet a complete accounting of their inner workings. In particular, it is not entirely clear what aspects of sentence-level syntax are captured by these representations, nor how (if at all) they are built along the stacked layers of the network. In this paper, we aim to address such questions with a general class of interventional, input perturbation-based analyses of representations from pretrained language models. Importing from computational and cognitive neuroscience the notion of representational invariance, we perform a series of probes designed to test the sensitivity of these representations to several kinds of structure in sentences. Each probe involves swapping words in a sentence and comparing the representations from perturbed sentences against the original. We experiment with three different perturbations: (1) random permutations of n-grams of varying width, to test the scale at which a representation is sensitive to word position; (2) swapping of two spans which do or do not form a syntactic phrase, to test sensitivity to global phrase structure; and (3) swapping of two adjacent words which do or do not break apart a syntactic phrase, to test sensitivity to local phrase structure. Results from these probes collectively suggest that Transformers build sensitivity to larger parts of the sentence along their layers, and that hierarchical phrase structure plays a role in this process. More broadly, our results also indicate that structured input perturbations widens the scope of analyses that can be performed on often-opaque deep learning systems, and can serve as a complement to existing tools (such as supervised linear probes) for interpreting complex black-box models. 1 1 Datasets, extracted features and code will be publicly available upon publication.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "It is still unknown how distributed information processing systems encode and exploit complex relational structures in data, despite their ubiquitous use in the modern world. The fields of deep learning (Saxe et al., 2013; Hewitt and Manning, 2019) , neuroscience (Sarafyazd and Jazayeri, 2019; Stachenfeld et al., 2017) , and cognitive science (Elman, 1991; Kemp and Tenenbaum, 2008; Tervo et al., 2016) have given great attention to this question, including a productive focus on the potential models and their implementations of hierarchical tasks, such as predictive maps and graphs. In this work, we provide a generic means of identifying input structures that deep language models use to \"chunk up\" vastly complex data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 203, |
|
"end": 222, |
|
"text": "(Saxe et al., 2013;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 248, |
|
"text": "Hewitt and Manning, 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 294, |
|
"text": "(Sarafyazd and Jazayeri, 2019;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 320, |
|
"text": "Stachenfeld et al., 2017)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 345, |
|
"end": 358, |
|
"text": "(Elman, 1991;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 359, |
|
"end": 384, |
|
"text": "Kemp and Tenenbaum, 2008;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 404, |
|
"text": "Tervo et al., 2016)", |
|
"ref_id": "BIBREF46" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Natural (human) language provides a rich domain for studying how complex hierarchical structures are encoded in information processing systems. More so than other domains, human language is unique in that its underlying hierarchy has been extensively studied and theorized in linguistics, which provides source of \"ground truth\" structures for stimulus data. Much prior work on characterizing the types of linguistic information encoded in computational models of language such as neural networks has focused on supervised readout probes, which train a classifier on top pretrained models to predict a particular linguistic label (Belinkov and Glass, 2017; Liu et al., 2019a; Tenney et al., 2019) . In particular, Hewitt and Manning (2019) apply probes to discover linear subspaces that encode tree-distances as distances in the representational subspace, and show that these distances can be used even without any labeled information to induce hierarchical structure. However, recent work has highlighted issues with correlating supervised probe performance with the amount of language structure encoded in such representations (Hewitt and Liang, 2019) . Another popular approach to analyzing deep models is through the lens of geometry (Reif et al., 2019; Gigante et al., 2019) . While geometric interpretations provide significant insights, they present another challenge in summarizing the structure in a quantifiable way. More recent techniques such as replica-based mean field manifold analysis method (Chung et al., 2018; Cohen et al., 2019; Mamou et al., 2020) connects representation geometry with linear classification performance, but the method is limited to categorization tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 630, |
|
"end": 656, |
|
"text": "(Belinkov and Glass, 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 657, |
|
"end": 675, |
|
"text": "Liu et al., 2019a;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 676, |
|
"end": 696, |
|
"text": "Tenney et al., 2019)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 714, |
|
"end": 739, |
|
"text": "Hewitt and Manning (2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1129, |
|
"end": 1153, |
|
"text": "(Hewitt and Liang, 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1238, |
|
"end": 1257, |
|
"text": "(Reif et al., 2019;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1258, |
|
"end": 1279, |
|
"text": "Gigante et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1508, |
|
"end": 1528, |
|
"text": "(Chung et al., 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1529, |
|
"end": 1548, |
|
"text": "Cohen et al., 2019;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1549, |
|
"end": 1568, |
|
"text": "Mamou et al., 2020)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we make use of an experimental framework from cognitive science and neuroscience to probe for hierarchical structure in contextual representations from pretrained Transformer models (i.e., BERT (Devlin et al., 2018) and its variants). A popular technique in neuroscience involves measuring change in the population activity in response to controlled, input perturbations (Mollica et al., 2020; Ding et al., 2016) . We apply this approach to test the characteristic scale and the complexity ( Fig. 1 ) of hierarchical phrase structure encoded deep contextual representations, and present several key findings:", |
|
"cite_spans": [ |
|
{ |
|
"start": 208, |
|
"end": 229, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 407, |
|
"text": "(Mollica et al., 2020;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 408, |
|
"end": 426, |
|
"text": "Ding et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 506, |
|
"end": 512, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1. Representations are distorted by shuffling small n-grams in early layers, while the distortion caused by shuffling large n-grams starts to occur in later layers, implying the scale of characteristic word length increases from input to downstream layers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2. Representational distortion caused by swapping two constituent phrases is smaller than when the control sequences of the same length are swapped, indicating that the BERT representations are sensitive to hierarchical phrase structure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "3. Representational distortion caused by swapping adjacent words across phrasal boundary is larger than when the swap is within a phrasal boundary; furthermore, the amount of distortion increases with the syntactic distance between the swapped words. The correlation between distortion and tree distance increases across the layers, suggesting that the characteristic complexity of phrasal subtrees increases across the layers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "4. Early layers pay more attention between syntactically closer adjacent pairs and deeper layers pay more attention between syntactically distant adjacent pairs. The attention paid in each layer can explain some of the emergent sensitivity to phrasal structure across layers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our work demonstrates that interventional tools such as controlled input perturbations can be useful for analyzing deep networks, adding to the growing, interdisciplinary body of work which profitably adapt experimental techniques from cognitive neuroscience and psycholinguistics to analyze computational models of language (Futrell et al., 2018; Ettinger, 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 325, |
|
"end": 347, |
|
"text": "(Futrell et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 363, |
|
"text": "Ettinger, 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Eliciting changes in behavioral and neural responses through controlled input perturbations is a common experimental technique in cognitive neuroscience and psycholinguistics (Tsao and Livingstone, 2008; Mollica et al., 2020) . Inspired by these approaches, we perturb input sentences and measure the discrepancy between the resulting, perturbed representation against the original. While conceptually simple, this approach allows for a targeted analysis of internal representations obtained from different layers of deep models, and can suggest partial mechanisms by which such models are able to encode linguistic structure. We note that sentence perturbations have been primarily utilized in NLP for representation learning (Hill et al., 2016; Artetxe et al., 2018; Lample et al., 2018) , data augmentation (Wang et al., 2018; Andreas, 2020) , and testing for model robustness (e.g., against adversarial examples) (Jia and Liang, 2017; Belinkov and Bisk, 2018) . A methodological contribution of our work is to show that input perturbations can complement existing tools and widens the scope of questions that could be asked of representations learned by deep networks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 203, |
|
"text": "(Tsao and Livingstone, 2008;", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 225, |
|
"text": "Mollica et al., 2020)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 727, |
|
"end": 746, |
|
"text": "(Hill et al., 2016;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 747, |
|
"end": 768, |
|
"text": "Artetxe et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 769, |
|
"end": 789, |
|
"text": "Lample et al., 2018)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 810, |
|
"end": 829, |
|
"text": "(Wang et al., 2018;", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 830, |
|
"end": 844, |
|
"text": "Andreas, 2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 917, |
|
"end": 938, |
|
"text": "(Jia and Liang, 2017;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 939, |
|
"end": 963, |
|
"text": "Belinkov and Bisk, 2018)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this work we consider three different types of sentence perturbations designed to probe for different phenomena.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence perturbations", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "n-gram shuffling In the n-gram shuffling experiments, we randomly shuffle the words of a sentence in units of n-grams, with n varying from 1 (i.e., individual words) to 7 (see Fig. 2a for an example). While the number of words which change absolute position is similar for different n, larger n will better preserve the local context (i.e., relative position) of more words. Thus, we reason that n-gram swaps affect the representations selective to the context with size n or higher within the sentence, and that lower n will result in greater distortion in sentence representations.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 176, |
|
"end": 183, |
|
"text": "Fig. 2a", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentence perturbations", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Phrase swaps The n-gram shuffling experiments probe for sensitivity of representations to local context without taking into account syntactic structure. In the phrase swap experiments, we perturb a sentence by swapping two randomly chosen spans. We explore two ways of swapping spans. In the first setting, the spans are chosen such that they are valid phrases according to its parse tree. 3 In the second setting, the spans are chosen that they are invalid phrases. Importantly, in the second, control setting, we fix the length of the spans such that the lengths of spans that are chosen to be swapped are the same as in the first setting (see Fig. 3a for an example). We hypothesize that swapping invalid phrases will result in more distortion than swapping valid phrases, since invalid swaps will result in greater denigration of syntactic structure.", |
|
"cite_spans": [ |
|
{ |
|
"start": 390, |
|
"end": 391, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 646, |
|
"end": 653, |
|
"text": "Fig. 3a", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentence perturbations", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Adjacent word swaps In the adjacent word swapping experiments, we swap two adjacent words in a sentence. We again experiment with two settings -in the first setting, the swapped words stay within the phrase boundary (i.e., the two words share the same parent), while in the second setting, the swapped words cross phrase boundaries. We also perform a more fine-grained analysis where we condition the swaps based on the \"syntactic distance\" between the swapped words, where syntactic distance is defined as the distance between the two words in the parse tree (see Fig. 6c ). Since a phrase corresponds to a subtree of the parse tree, this distance also quantifies the number of nested phrase boundaries between two adjacent words. Here, we expect the amount of distortion to be positively correlated with the syntactic distance of the words that are swapped.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 565, |
|
"end": 572, |
|
"text": "Fig. 6c", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentence perturbations", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Transformers For our sentence representation, we focus on the Transformer-family of models pretrained on largescale language datasets (BERT and its variants). Given an input word embedding matrix X \u2208 R T \u00d7d for a sentence of length T , the Transformer applies self attention over the previous layer's representation to produce a new representation,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual representations from", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "X l = f l ([H l,1 , . . . , H l,H ]), H l,i = A l,i X l\u22121 V l,i , A l,i = softmax (X l\u22121 Q l,i )(X l\u22121 K l,i ) \u221a d k ,", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Contextual representations from", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where f l is an MLP layer, H is the number of heads,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual representations from", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "d H = d", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual representations from", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "H is the head embedding dimension, and Q l,i , K l,i , V l,i \u2208 R d\u00d7d k are respectively the learned query, key, and value projection matrices at layer l for head i. The MLP layer consists of a residual layer followed by layer normalization and a nonlinearity. The 0-th layer representation X 0 is obtained by adding the position embeddings and the segment embeddings to the input token embeddings X, and passing it through normalization layer. 4 In this paper, we conduct our distortion analysis mainly on the intermediate Transformer represen", |
|
"cite_spans": [ |
|
{ |
|
"start": 444, |
|
"end": 445, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual representations from", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "- tations X l = [x l,1 , . . . , x l,T ], where x l,t \u2208 R d", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual representations from", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "is the contextualized representation for word t at layer l. 5 We analyze the trend in distortion as a function of layer depth l for the different perturbations. We also explore the different attention heads H l,i \u2208 R T \u00d7d H and the associated attention matrix A l,i \u2208 R T \u00d7T to inspect whether certain attention heads specialize at encoding syntactic information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 61, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual representations from", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Our input manipulations allow us to specify the distortion at the input level, and we wish to measure the corresponding distortion in the representation space ( Fig. 1 ). Due to the attention mechanism, a single vector in an intermediate layer is a function of the representations of (potentially all) the other tokens in the sentence. Therefore, the information about a particular word might be distributed among the many feature vectors of a sentence, and we wish to consider all feature vectors together as a single sentence-level representation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 167, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Distortion metric", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We thus represent each sentence as a matrix and use the distance induced by matrix 2-norm. Specifically, let P \u2208 {0, 1} T \u00d7T be the binary matrix representation of a permutation that perturbs the input sentence, i.e.,X = PX. Further letX l and X l be the corresponding sentence representations for the l-th layer for the perturbed and original sentences. To ignore uniform shifting and scaling, we also z-score each feature dimension of each layer (by subtracting the mean and dividing by the standard deviation where these statistics are obtained from the full Penn Treebank corpus) to giveZ l and Z l . Our distortion metric for layer l is then defined as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distortion metric", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Z l \u2212 P \u22121Z l / \u221a T d,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distortion metric", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where \u2022 is the matrix 2-norm (i.e., Frobenius norm). 6 Importantly, we in-vert the permutation of the perturbed representation to preserve the original ordering, which allows us to measure the distortion due to structural change, rather than distortion due to simple differences in surface form. We divide by", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distortion metric", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "T d to make the metric comparable between sentences (with different T ) and networks (with different d).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u221a", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Intuitively, our metric is the scaled Euclidean distance between the z-scored, flattened sentence representations, z l \u2208 R T d . Because each dimension is independently centered and standardized, the maximally unstructured distribution of z l is an isotropic T d-dimensional Gaussian. The expected distance between two such vectors is approximately \u221a 2T d. Therefore, we can interpret a distortion value approaching \u221a 2 as comparable to if we had randomly redrawn the perturbed representation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u221a", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We apply our perturbation-based analysis on sentences from the English Penn Treebank (Marcus et al., 1994) , where we average the distortion metric across randomly chosen sentences. We analyze the distortion, as measured by length-normalized Frobenius norm between the perturbed and original representations, as a function of layer depth. Layers that experience large distortion when the syntactic structure is disrupted from the perturbation can be interpreted as being more sensitive to hierarchical syntactic structure.", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 106, |
|
"text": "(Marcus et al., 1994)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "As we found the trend to be largely similar across the different models, in the following section, we primarily discuss results from BERT (bert-base-cased). We replicate key results with other pretrained and randomly-initialized Transformer-based models as well (see A.1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "When we shuffle in units of larger n-grams, it only introduces distortions in the deeper BERT layers compared to smaller n-gram shuffles. The n-gram sized shuffles break contexts larger than n, while preserving contexts of size n or smaller. Interestingly, smaller n-gram shuffles diverge from the original sentence in the early layers ( Fig. 2b , top curve), implying that only in early layers are representations built from short-range contexts. Larger n-gram shuffles remain minimally distorted for 'longer' (Fig. 2b , bottom curve), implying that long- range contexts play a larger role deeper layer representations.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 338, |
|
"end": 345, |
|
"text": "Fig. 2b", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 511, |
|
"end": 519, |
|
"text": "(Fig. 2b", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sensitivity to perturbation size increases along BERT layers", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Effects of phrasal boundaries Since BERT seems to build larger contexts along its layers, we now ask whether those contexts are structures of some grammatical significance. A basic and important syntactic feature is the constituent phrase, which BERT has previously been shown to represented in some fashion (Goldberg, 2019; . We applied two targeted probes of phrase structure in the BERT representation, and found that phrasal boundaries are indeed influential. If we swap just two n-grams, the BERT representations are less affected when phrases are kept intact. We show this by swapping only two ngrams per sentence and comparing the distortion when those n-grams are phrases to when they cross phrase boundaries (Fig. 3a) , where we control for the length of n-grams that are swapped in both settings. There is less distortion when respecting phrase boundaries, which is evident among all feature vectors, including those in the position of words which did not get swapped (Fig. 2d ). The global contextual information, distributed across the sentence, is affected by the phrase boundary.", |
|
"cite_spans": [ |
|
{ |
|
"start": 308, |
|
"end": 324, |
|
"text": "(Goldberg, 2019;", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 717, |
|
"end": 726, |
|
"text": "(Fig. 3a)", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 978, |
|
"end": 986, |
|
"text": "(Fig. 2d", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sensitivity to perturbation size increases along BERT layers", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To see if the role of a phrase impacts its salience, we distinguish between verb phrases (VP) and noun phrase (NP) swaps. Swapping VP results in more distortion than swapping NP ( Fig. 2e ). Since VP are in general larger than NP, this effect could in principle be due simply to the number of words being swapped. Yet that is not the case: Using a partial linear regression (see details in A.4), we can estimate the difference between the VP and NP distortions conditional on any smooth function of the swap size, and doing this reveals that there is still a strong difference in the intermediate layers (Fig. 2f ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 187, |
|
"text": "Fig. 2e", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 604, |
|
"end": 612, |
|
"text": "(Fig. 2f", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sensitivity to perturbation size increases along BERT layers", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Having seen that representations are sensitive to phrase boundaries, we next explore whether that sensitivity is proportional to the number of phrase boundaries that are broken, which is a quantity related to the phrase hierarchy. Instead of swapping entire phrases, we swap two adjacent words and analyze the distortion based on how far apart the two words are in the constituency tree (Fig. 3a) 7 . This analysis varies the distance in the deeper tree structure while keeping the distance in surface form constant (since we always swap adjacent words). If the hierarchical representations are indeed being gradually built up along the layers of these pretrained models, we expect distortion to be greater for word swaps that are further apart in tree distance. We indeed find that there is a larger distortion when swapping syntactically distant words (Fig. 3b ). This distortion grows from earlier to later BERT layers. Furthermore, when looking at the per-head representations of each layer, we see that in deeper layers there are more heads showing a positive rank correlation between distortion and tree distance (Fig. 3c) . In addition to a sensitivity to phrase boundaries, deeper BERT layers develop a sensitivity to the number of boundaries that are broken.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 387, |
|
"end": 396, |
|
"text": "(Fig. 3a)", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 854, |
|
"end": 862, |
|
"text": "(Fig. 3b", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 1119, |
|
"end": 1128, |
|
"text": "(Fig. 3c)", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sensitivity depends on syntactic distance of the perturbation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Controlling for co-occurrence Since words in the same phrase may tend to occur together more often, co-occurrence is a potential confound when assessing the effects of adjacent word swaps. Cooccurrence is a simple statistic which does not require any notion of grammar to compute -indeed it is used to train many non-contextual word embeddings (e.g., word2vec (Mikolov et al., 2013) , GloVe (Pennington et al., 2014) ). So it is natural to ask whether BERT's resilience to syntactically closer swaps goes beyond simple co-occurrence statistics. For simplicity, let us focus on whether a swap occurs within a phrase (tree distance = 2) or not.", |
|
"cite_spans": [ |
|
{ |
|
"start": 360, |
|
"end": 382, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 416, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity depends on syntactic distance of the perturbation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As an estimate of co-occurrence, we used the pointwise mutual information (PMI). Specifically, for two words w and v, the PMI is", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity depends on syntactic distance of the perturbation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "log p(w,v) p(w)p(v)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity depends on syntactic distance of the perturbation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": ", which is estimated from the empirical probabilities. We confirm that adjacent words in the same phrase do indeed have a second mode at high PMI (Fig. 3e) . Dividing the swaps into those whose words have high PMI (above the marginal median) and low PMI (below it), we can see visually that the difference between within-phrase swaps and outof-phrase swaps persists in both groups (Fig. 3f) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 155, |
|
"text": "(Fig. 3e)", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 381, |
|
"end": 390, |
|
"text": "(Fig. 3f)", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sensitivity depends on syntactic distance of the perturbation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "When quantitatively accounting for the effect of PMI with a partial linear regression (see A.4), there remains a significant correlation between the breaking of a phrase and the subsequent distortion. This indicates that the greater distortion for word swaps which cross phrase boundaries is not simply due to surface co-occurrence statistics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity depends on syntactic distance of the perturbation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Relation to linguistic information Do our input perturbations, and the resulting the distortions, reflect changes in the encoding of important linguistic information? One way to address this question, which is popular in computational neuroscience (DiCarlo and Cox, 2007) and more recently BERTology (Liu et al., 2019a; Tenney et al., 2019) , is to see how well a linear classifier trained on a linguistic task generalizes from the (representations of the) unperturbed sentences to the perturbed ones. With supervised probes, we can see how much the representations change with respect to the subspaces that encode specific linguistic information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 248, |
|
"end": 271, |
|
"text": "(DiCarlo and Cox, 2007)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 319, |
|
"text": "(Liu et al., 2019a;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 340, |
|
"text": "Tenney et al., 2019)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity depends on syntactic distance of the perturbation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Specifically, we relate representational distortion to three common linguistic tasks of increasing complexity: part of speech (POS) classification; grandparent tag (GP) classification (Tenney et al., 2019) ; and a parse tree distance reconstruction (Hewitt and Manning, 2019) 8 . The probe trained on each of these tasks is a generalized linear model, mapping a datapoint x (i.e. representations from different layers) to a conditional distribution of the labels, p(y|\u03b8 T x) (see A.5 for model details). Thus a ready measure of the effect of each type of swap, for a single sentence, is log p(y|\u03b8 T x i ) \u2212 log p(y|\u03b8 Tx i ), wherex i is same datum as x i in the perturbed representation 9 . Averaging this quantity over all datapoints gives a measure of content-specific distortion within a representation, which we will call \"inference impairment\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 205, |
|
"text": "(Tenney et al., 2019)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 275, |
|
"text": "(Hewitt and Manning, 2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity depends on syntactic distance of the perturbation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Based on the three linguistic tasks, the distortion we measure from the adjacent word swaps is more strongly related to more complex information. The inverted L shape of Fig. 4a suggests that increasing distortion is only weakly related to impairment of POS inference, which is perhaps unsurprising given that POS tags can be readily predicted from Figure 4 : Distortion and inference impairment for increasing linguistic complexity. In each plot, a point is the average (distortion, 'impairment') for a given layer and a given class of word swap distance. Points are connected by lines according to their swap type (i.e. tree distance). The circles are colored according to layer (see right for a legend). Averages are taken over 600 test sentences, with one of each swap type per sentence, and both distortion and log-likelihood are computed for every word in the sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 177, |
|
"text": "Fig. 4a", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 357, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sensitivity depends on syntactic distance of the perturbation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "local context. A deeper syntactic probe, the GP classifier (4b), does show a consistent positive relationship, but only for swaps which break a phrase boundary (i.e. distance >2). Meanwhile, impairment of the distance probe (4c), which reconstructs the full parse tree, has a consistently positive relationship with distortion, whose slope is proportionate to the tree distance of the swap. Thus, when specifically perturbing the phrasal boundary, the representational distortion is related to relatively more complex linguistic information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity depends on syntactic distance of the perturbation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In the transformer architecture, contexts are built with the attention mechanism. Recall that attention is a mechanism for allowing input vectors to interact when forming the output, and the ultimate output for a given token is a convex combination of the features of all tokens (Eq. 1). It has been shown qualitatively that, within a layer, BERT allocates attention preferentially to words in the same phrase , so if our perturbations affect inference of phrase structure then the changes in attentions could explain our results. Note that it is not guaranteed to do so: the BERT features in a given layer are a function of the attentions and the \"values\" (each token's feature vector), and both are affected by our perturbations. Therefore our last set of experiments asks whether attention alone can explain the sensitivity to syntactic distance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity to perturbations is mediated by changes in attention", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "To quantify the change in attention weights across the whole sentence, we compute the distance between each token's attention weights in the perturbed and unperturbed sentences, and average across all tokens. For token i, its vector of attention weights in response to the unperturbed sentence is a i , and for the perturbed one\u00e3 i (such that j a i j = 1). Since each set of attention weights are non-negative and sum to 1 due to softmax, we use the relative entropy 10 as a distance measure. This results in the total change in attention being:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity to perturbations is mediated by changes in attention", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2206a = 1 T T i=1 T j=1 a i j log a i j a i j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity to perturbations is mediated by changes in attention", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "which is non-negative and respects the structure of the weights. We confirmed that other measures (like the cosine similarity) produce results that are qualitatively similar. First, we observe that the changes in the attention depend on the layer hierarchy when adjacent word swaps break the phrase boundary. Like the distortion, attention changes little or not at all in the early layers, and progressively more in the final layers ( Fig. 5b) . Furthermore, these changes are also positively correlated with syntactic distance in most cases, which suggests that representation's sensitivities to syntactic tree distance may primarily be due to changes in attention.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 435, |
|
"end": 443, |
|
"text": "Fig. 5b)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sensitivity to perturbations is mediated by changes in attention", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "To see whether the changes in attention can in fact explain representational sensitivity to syntactic distance, we turned to the same partial linear regression model as before (A.4) to compute the the correlation between the representation's distortion and the tree distance between the swapped adjacent words, after controlling for changes in attention (\u2206a). The correlations substantially reduced in the controlled case (Fig. 5c ), which suggests that attention weights contribute to the representational sensitivity to syntactic tree distance; but the correlations are not eliminated, which suggests that distortions from the previous layer also contribute. The head/layer-wise rank correlations (\u00b195% confidence intervals) between distortion and tree distance after controlling for changes in attention, plotted against the uncontrolled rank correlations. Being below the diagonal indicates that the relationship between distortion and tree distance is partially explained by \u2206a.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 422, |
|
"end": 430, |
|
"text": "(Fig. 5c", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sensitivity to perturbations is mediated by changes in attention", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In this paper, we used the representational sensitivity to controlled input perturbation as a probe of hierarchical phrasal structure in deep linguistic representations. The logic of our probe is the representations which respect phrase structure should less sensitive to perturbations which preserve the phrasal unit, and more sensitive to those which disrupt a phrase. We hope that our results demonstrate the versatility and utility of perturbation-based approaches to studying deep language models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We showed that BERT and its variants build representations which are sensitive to the phrasal unit, as demonstrated by greater invariance to perturbations preserving phrasal boundaries compared to control perturbations which break the phrasal boundaries (Fig. 2-5 ). We also find that while the representational sensitivity to broken phrase boundaries grows across layers, this increase in sensitivity is more prominent when the breakage occurs between two words that are syntactically distant (i.e., when the broken phrase is more complex). Using the same methods to show that changes in attention provide a partial explanation for perturbationinduced distortions.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 263, |
|
"text": "(Fig. 2-5", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "While our distortion metric is a task-agnostic measure of change in the neural population activity, this may or may not reflect changes in the encoding of specific linguistic information. To relate our metric with specific kinds of information, we measured the change in the performance of supervised linear probes trained on top of the representation (Fig. 4) . The probe sensitivity measure also bears a suggestive resemblance to the saliency map analysis (Simonyan et al., 2014) in machine learning, which is used to highlight the most output-sensitive regions within the input. To draw an analogy with that work, one way of characterizing our results is that phrasal boundaries are regions of high saliency in hidden representations and that, in deep layers, complex phrase boundaries are more salient than simple phrase boundaries. Further exploring the use of supervised probes and our input perturbations as a tool for layerwise probing of syntactic saliency is a promising direction for future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 458, |
|
"end": 481, |
|
"text": "(Simonyan et al., 2014)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 352, |
|
"end": 360, |
|
"text": "(Fig. 4)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Finally, several studies (Sinha et al., 2021; Gupta et al., 2021; Pham et al., 2020) , have recently found that masked language models pretrained or finetuned on sentences that break natural word order (e.g. via n-gram shuffling) still perform quite well across various tasks, even on supervised probes of syntactic phenomena. It would be interesting to apply our perturbative analyses on such models to see if they exhibit less sensitivity to the experimental vs. control setups (e.g. n-gram vs. phrase swaps). This may indicate that such models do not capture representational correlates of phrase structure in their representations despite their good performance on supervised probing tasks. In such a case, what tasks would actually require the \"linguistic knowledge\" that we are probing for? In similar vein, applying our perturbative analyses on models that explicitly incorporate syntax into their representations (Sundararaman et al., 2019; Zanzotto et al., 2020; Kuncoro et al., 2020) might provide further insights.", |
|
"cite_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 45, |
|
"text": "(Sinha et al., 2021;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 46, |
|
"end": 65, |
|
"text": "Gupta et al., 2021;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 66, |
|
"end": 84, |
|
"text": "Pham et al., 2020)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 921, |
|
"end": 948, |
|
"text": "(Sundararaman et al., 2019;", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 949, |
|
"end": 971, |
|
"text": "Zanzotto et al., 2020;", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 972, |
|
"end": 993, |
|
"text": "Kuncoro et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Our method and results suggest many interesting future directions. We hope that this work will motivate: (1) a formal theory of efficient hierarchical data representations in distributed features; (2) a search for the causal connection between attention structure, the representational geometry, and the model performance; (3) potential applications in network pruning studies; (4) an extension of the current work as a hypothesis generator in neuroscience to understand how neural populations implement tasks with an underlying compositional structure. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In addition to the scaled Frobenius distance, we also considered other ways of measuring distortion in the representation. We will briefly report results for two other metrics, and describe them here.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Additional metrics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "CCA Canonical correlations analysis (CCA) (Raghu et al., 2017) measures the similarity of two sets of variables using many samples from each. Given two sets of random variables x = (x 1 , x 2 , ..., x n ) and y = (y 1 , y 2 , ..., y m ), CCA finds linear weights a \u2208 R n and b \u2208 R m which maximise cov(a \u2022 x, b \u2022 y). In our context, we treat the representation of the original sentence as x, and the representation of the perturbed sentence as y, and the resulting correlation as a similarity measure.", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 62, |
|
"text": "(Raghu et al., 2017)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Additional metrics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Since CCA requires many samples, we use the set of all word-level representations across all perturbed sentences. For example, to construct the samples of x from S perturbed sentences, we get use", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Additional metrics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "[X 1 |X 2 |...|X S ], where each X i \u2208 R 768\u00d7T i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Additional metrics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Unless specified otherwise, S = 400. For good estimates, CCA requires many samples (on the order of at least the number of dimensions), and we facilitate this by first reducing the dimension of the matrices using PCA. Using 400 components preserves \u223c 90% of the variance. Thus, while CCA gives a good principled measure of representational similarity, its hunger for samples makes it unsuitable as a per-sentence metric.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Additional metrics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also measured distortion using Projec- tion Weighted Canonical Correlation Analysis (PWCCA), an improved version of CCA to estimate the true correlation between tensors (Morcos et al., 2018) . 12 As reported in Figure 7 , we did not find any qualitative differences between PWCCA and CCA in our experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 193, |
|
"text": "(Morcos et al., 2018)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 196, |
|
"end": 198, |
|
"text": "12", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 222, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.2 Additional metrics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Cosine A similarity measure defined on individual sentences is the cosine between the sentencelevel representations. By sentence-level representation, we mean the concatenation of the wordlevel vectors into a single vector s \u2208 R N T (where N is the dimension of each feature vector). Treating each dimension of the vector as a sample, we can then define the following metric: corr s original i , s swapped i . This is equivalent to computing the cosine of the vectors after subtracting the (scalar) mean across dimensions, hence we will refer to it as 'cosine'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Additional metrics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section, we describe additional details of the manipulations done on the datasets. n-gram shuffling For a given a sentence, we split it into sequential non-overlapping n-gram's from left to right; if the length of the sentence is not a multiple of n, the remaining words form an additional m-gram, m < n. The list of the n-gram's is randomly shuffled. Note that the 1-gram case is equivalent to a random shuffling of the words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.3 Additional details on the dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our analysis, we consider n-grams, with n varying from 1 (i.e., individual words) to 7 and all the sentences have at least 10 words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.3 Additional details on the dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We provide here an example of n-gram shuffling.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.3 Additional details on the dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Original: The market 's pessimism reflects the gloomy outlook in Detroit Phrase swaps Using constituency trees from the Penn Treebank (Marcus et al., 1994) , we define phrases as constituents which don't contain any others within them. (See Fig. 2c or Fig. 3a in the main text.) Phrase swaps thus consist of swapping one phrase with another, and leaving other words intact.", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 157, |
|
"text": "(Marcus et al., 1994)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 243, |
|
"end": 250, |
|
"text": "Fig. 2c", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 261, |
|
"text": "Fig. 3a", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.3 Additional details on the dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To provide an appropriate control perturbation, we swap two disjoint n-grams, which are the same size as true phrases but cross phrase boundaries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.3 Additional details on the dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Adjacent word swaps To better isolate the effect of broken phrase boundaries, we used adjacent word swaps. Adjacent words were chosen randomly, and one swap was performed per sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.3 Additional details on the dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to control for uninteresting explanations of our results, we often make use of a simple method for regressing out confounds. Generally, we want to assess the linear relationship between X and Y , when accounting for the (potentially non-linear) effect of another variable Z. In our experiments, X is always the swap-induced distortion and Y is the swap type, like integer-valued tree distance or binary-valued in/out phrase. We wish to allow E[Y |Z] and E[X|Z] to be any smooth function of Z, which is achieved by the least-squares solution to the following partially linear model:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 Partial linear regression", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Y \u223c \u03b2 x X + \u03b2 z \u2022 f (Z)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 Partial linear regression", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where f (z) is a vector of several (we use 10) basis functions (we used cubic splines with knots at 10 quantiles) of Z. Both regressions have the same optimal \u03b2 x , but the one on the left is computationally simpler (Hansen, 2000) . The standard confidence intervals on \u03b2 x apply.", |
|
"cite_spans": [ |
|
{ |
|
"start": 216, |
|
"end": 230, |
|
"text": "(Hansen, 2000)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 Partial linear regression", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Intuitively, the \u03b2 x obtained by the partially linear regression above is related to the conditional correlation of X and Y given Z: \u03c1(X, Y |Z). Like an unconditonal correlation, it will be zero if X and Y are conditionally independent given Z, but not necessarily vice versa (both X and Y must be Gaussian for the other direction to be true). To compute conditional rank correlations (which assess a monotonic relationship between X and Y ), we rank-transform X and Y (this changes the confidence interval calculations).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 Partial linear regression", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We apply this method to swap size in Fig. 2 and attentions in Fig. 5 . In these supplemental materials, we will also report the results when X is the binary in/out phrase variable, and Z is PMI. The full p-values and coefficients of the uncontrolled and controlled regressions can be found in Table 1 , where we observe that past layer 2, the p-value on phrase boundary is very significant (p < 10 \u221212 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 43, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 62, |
|
"end": 68, |
|
"text": "Fig. 5", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 293, |
|
"end": 300, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.4 Partial linear regression", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section, we describe the experiments based on the three linguistic tasks: parts of Speech (POS); grandparent tags (GP); and constituency tree distance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.5 Supervised probes", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The POS and GP classifiers were multinomial logistic regressions trained to classify each word's POS tag (e.g. 'NNP', 'VB') and the tag of its grandparent in the constituency tree, respectively. If a word has no grandparent, its label is the root token 'S'. The probes were optimized with standard stochastic gradient descent, 50 sentences from the PTB per mini-batch. 10 epochs, at 10 \u22123 learning rate, were sufficient to reach convergence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.5 Supervised probes", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The distance probe is a linear map B applied to each word-vector w in the sentence, and trained such that, for all word pairs i, j, TreeDist(i, j) matches B(w i \u2212 w j ) 2 2 as closely as possible. Unlike the classifiers, there is freedom in the out-put dimension of B; we used 100, although performance and results are empirically the same for any choice greater than \u223c 64. Our probes are different from (Hewitt and Manning, 2019) in two ways: (1) we use constituency trees, instead of dependency trees, and (2) instead of an L1 loss function, we use the Poisson (negative) log-likelihood as the loss function. That is, if \u03bb i,j = B(w i \u2212 w j ) 2 2 , and y i,j = TreeDist(i, j) \u2212l i,j = y i,j log \u03bb i,j \u2212 \u03bb i,j \u2212 log y i,j ! Otherwise, the probes are trained exactly as in (Hewitt and Manning, 2019). Specifically, we used standard SGD with 20 sentences from the PTB in each mini-batch, for 40 epochs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 404, |
|
"end": 430, |
|
"text": "(Hewitt and Manning, 2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.5 Supervised probes", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Evaluation A linear model is fit to maximize p(y|\u03b8(x)), with p a probability function (multinomial for classifiers, Poisson for distance), and x coming from the unperturbed transformer representation. We evaluate the model onx, which are the representations of the data when generated from a perturbed sentence. We take the average of log p(y|\u03b8(x i )) \u2212 log p(y|\u03b8(x i )) over all the data i in all sentences. For example, all words for the classifiers, and all pairs of words for the distance probe. Concretely, we are just measuring the difference in validation loss of the same probe on the x data and thex data. But because the loss is an appropriate probability function, we can interpret the same quantity as a difference in log-likelihood between the distribution conditioned on the regular representation and that conditioned on the perturbed representation. Distortion is similarly computed using the full sentence, providing a number for each swap in each sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.5 Supervised probes", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use constituency parse trees from the English Penn Treebank(Marcus et al., 1994).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "However, the exact specification for the MLP and X0 may vary across different pretrained models.5 BERT uses BPE tokenization(Sennrich et al., 2015), which means that some words are split into multiple tokens. Since we wish to evaluate representations at word-level, if a word is split into multiple tokens, its word representation is computed as the average of all its token representations.6 There are many possible ways of measuring distortion, induced by different norms. We observed the results to be qualitatively similar for different measures, and hence we focus on the Frobenius norm in our main results. We show the results from additional distortion metrics in the A.2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that for adjacent words, the number of broken phrase boundaries equals the tree distance minus two.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "While the original paper predicted dependency tree distance, in this paper we instead predict the constituency tree distance.9 POS-and GP-tag prediction outputs a sequence of labels for each sentence, while the distance probe outputs the constituency tree distance between each pair of words. Then log p(y|\u03b8 T xi) is simply the log probability of an individual label.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Also called the KL divergence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the implementation from https://github. com/huggingface/transformers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For both CCA and PWCCA, we use the implementation from https://github.com/google/svcca.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Here we go into further detail on our methods and data to aid in reproducibility.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here we give the details for all models considered in this paper. The majority of results are from BERT, but we also tested other variants. 11 12-layer, 768hidden, 12-heads, 110M parameters.Note that the hidden size is 768 across all the models. For each pre-trained model, input text is tokenized using its default tokenizer and features are extracted at token level.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Model details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "With PMI ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Without PMI", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Good-Enough Compositional Data Augmentation", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Andreas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Andreas. 2020. Good-Enough Compositional Data Augmentation. In Proceedings ACL.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Unsupervised neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2018. Unsupervised neural ma- chine translation. In Proceedings of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Synthetic and natural noise both break neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Bisk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonatan Belinkov and Yonatan Bisk. 2018. Synthetic and natural noise both break neural machine transla- tion. In Proceedings of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Analyzing hidden representations in end-to-end automatic speech recognition systems", |
|
"authors": [ |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2441--2451", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonatan Belinkov and James Glass. 2017. Analyz- ing hidden representations in end-to-end automatic speech recognition systems. In Advances in Neural Information Processing Systems, pages 2441-2451.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Classification and geometry of general perceptual manifolds", |
|
"authors": [ |
|
{ |
|
"first": "Sueyeon", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Daniel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haim", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sompolinsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Physical Review X", |
|
"volume": "8", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "SueYeon Chung, Daniel D Lee, and Haim Sompolin- sky. 2018. Classification and geometry of gen- eral perceptual manifolds. Physical Review X, 8(3):031003.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Separability and geometry of object manifolds in deep neural networks. bioRxiv", |
|
"authors": [ |
|
{ |
|
"first": "Uri", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sueyeon", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Daniel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haim", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sompolinsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Uri Cohen, SueYeon Chung, Daniel D Lee, and Haim Sompolinsky. 2019. Separability and geometry of object manifolds in deep neural networks. bioRxiv, page 644658.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Untangling invariant object recognition", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "James", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David D", |
|
"middle": [], |
|
"last": "Dicarlo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cox", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Trends in cognitive sciences", |
|
"volume": "11", |
|
"issue": "8", |
|
"pages": "333--341", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James J DiCarlo and David D Cox. 2007. Untangling invariant object recognition. Trends in cognitive sci- ences, 11(8):333-341.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Cortical tracking of hierarchical linguistic structures in connected speech", |
|
"authors": [ |
|
{ |
|
"first": "Nai", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Melloni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xing", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Poeppel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Nature neuroscience", |
|
"volume": "19", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nai Ding, Lucia Melloni, Hang Zhang, Xing Tian, and David Poeppel. 2016. Cortical tracking of hierarchi- cal linguistic structures in connected speech. Nature neuroscience, 19(1):158.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Distributed representations, simple recurrent networks, and grammatical structure", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jeffrey L Elman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "Machine learning", |
|
"volume": "7", |
|
"issue": "2-3", |
|
"pages": "195--225", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey L Elman. 1991. Distributed representations, simple recurrent networks, and grammatical struc- ture. Machine learning, 7(2-3):195-225.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "What bert is not: Lessons from a new suite of psycholinguistic diagnostics for language models", |
|
"authors": [ |
|
{ |
|
"first": "Allyson", |
|
"middle": [], |
|
"last": "Ettinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "34--48", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00298" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Allyson Ettinger. 2020. What bert is not: Lessons from a new suite of psycholinguistic diagnostics for lan- guage models. Transactions of the Association for Computational Linguistics, 8:34-48.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "RNNs as psycholinguistic subjects: Syntactic state and grammatical dependency", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Futrell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ethan", |
|
"middle": [], |
|
"last": "Wilcox", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takashi", |
|
"middle": [], |
|
"last": "Morit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1809.01329" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Futrell, Ethan Wilcox, Takashi Morit, and Roger Levy. 2018. RNNs as psycholinguistic sub- jects: Syntactic state and grammatical dependency. arXiv:1809.01329.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Neural language models as psycholinguistic subjects: Representations of syntactic state", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Futrell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ethan", |
|
"middle": [], |
|
"last": "Wilcox", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takashi", |
|
"middle": [], |
|
"last": "Morita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Ballesteros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "32--42", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1004" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Futrell, Ethan Wilcox, Takashi Morita, Peng Qian, Miguel Ballesteros, and Roger Levy. 2019. Neural language models as psycholinguistic sub- jects: Representations of syntactic state. In Proceed- ings of the 2019 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 32-42, Minneapolis, Minnesota. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Visualizing the phate of neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Gigante", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Adam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Smita", |
|
"middle": [], |
|
"last": "Charles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gal", |
|
"middle": [], |
|
"last": "Krishnaswamy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mishne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1840--1851", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Scott Gigante, Adam S Charles, Smita Krishnaswamy, and Gal Mishne. 2019. Visualizing the phate of neu- ral networks. In Advances in Neural Information Processing Systems, pages 1840-1851.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Assessing bert's syntactic abilities", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.05287" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Goldberg. 2019. Assessing bert's syntactic abili- ties. arXiv preprint arXiv:1901.05287.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Bert family eat word salad: Experiments with text understanding", |
|
"authors": [ |
|
{ |
|
"first": "Ashim", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giorgi", |
|
"middle": [], |
|
"last": "Kvernadze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Srikumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashim Gupta, Giorgi Kvernadze, and Vivek Srikumar. 2021. Bert family eat word salad: Experiments with text understanding. AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Designing and Interpreting Probes with Control Tasks", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Hewitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Hewitt and Percy Liang. 2019. Designing and Interpreting Probes with Control Tasks. In Proceed- ings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A structural probe for finding syntax in word representations", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Hewitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4129--4138", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Hewitt and Christopher D Manning. 2019. A structural probe for finding syntax in word represen- tations. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4129-4138.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Learning distributed representations of sentences from unlabelled data", |
|
"authors": [ |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1367--1377", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felix Hill, Kyunghyun Cho, and Anna Korhonen. 2016. Learning distributed representations of sen- tences from unlabelled data. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, pages 1367-1377, San Diego, California. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Adversarial examples for evaluating reading comprehension systems", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Jia and Percy Liang. 2017. Adversarial exam- ples for evaluating reading comprehension systems. In Proceedings of EMNLP, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "The discovery of structural form", |
|
"authors": [ |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Kemp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Tenenbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the National Academy of Sciences", |
|
"volume": "105", |
|
"issue": "", |
|
"pages": "10687--10692", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charles Kemp and Joshua B Tenenbaum. 2008. The discovery of structural form. Proceedings of the Na- tional Academy of Sciences, 105(31):10687-10692.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Are Pre-trained Language Models Aware of Phrases? Simple but Strong Baselines for Grammar Induction", |
|
"authors": [ |
|
{ |
|
"first": "Taeuk", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jihun", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Edmiston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sang", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taeuk Kim, Jihun Choi, Daniel Edmiston, and Sang goo Lee. 2020. Are Pre-trained Language Models Aware of Phrases? Simple but Strong Baselines for Grammar Induction. In Proceedings of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Syntactic Structure Distillation Pretraining for Bidirectional Encoders", |
|
"authors": [ |
|
{ |
|
"first": "Adhiguna", |
|
"middle": [], |
|
"last": "Kuncoro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lingpeng", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Fried", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dani", |
|
"middle": [], |
|
"last": "Yogatama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Rimell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "776--794", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adhiguna Kuncoro, Lingpeng Kong, Daniel Fried, Dani Yogatama, Laura Rimell, Chris Dyer, and Phil Blunsom. 2020. Syntactic Structure Distillation Pre- training for Bidirectional Encoders. Transactions of the Association for Computational Linguistics, 8:776-794.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Unsupervised machine translation using monolingual corpora only", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludovic", |
|
"middle": [], |
|
"last": "Denoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Alexis Conneau, Ludovic Denoyer, and Marc'Aurelio Ranzato. 2018. Unsupervised ma- chine translation using monolingual corpora only. In Proceedings of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Albert: A lite bert for self-supervised learning of language representations", |
|
"authors": [ |
|
{ |
|
"first": "Zhenzhong", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piyush", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Soricut", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learn- ing of language representations.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Linguistic knowledge and transferability of contextual representations", |
|
"authors": [ |
|
{ |
|
"first": "Nelson", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nelson F. Liu, Matt Gardner, Yonatan Belinkov, Matthew E. Peters, and Noah A. Smith. 2019a. Lin- guistic knowledge and transferability of contextual representations. CoRR, abs/1903.08855.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Emergence of separable manifolds in deep language representations", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Mamou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel A Del", |
|
"middle": [], |
|
"last": "Rio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cory", |
|
"middle": [], |
|
"last": "Stephenson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanlin", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sueyeon", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2006.01095" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Mamou, Hang Le, Miguel A Del Rio, Cory Stephenson, Hanlin Tang, Yoon Kim, and SueYeon Chung. 2020. Emergence of separable manifolds in deep language representations. arXiv preprint arXiv:2006.01095.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "The penn treebank: Annotating predicate argument structure", |
|
"authors": [ |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grace", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"Ann" |
|
], |
|
"last": "Marcinkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Macintyre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Bies", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Ferguson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Katz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Britta", |
|
"middle": [], |
|
"last": "Schasberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Proceedings of the Workshop on Human Language Technology, HLT '94", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "114--119", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1075812.1075835" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mitchell Marcus, Grace Kim, Mary Ann Marcinkiewicz, Robert MacIntyre, Ann Bies, Mark Ferguson, Karen Katz, and Britta Schasberger. 1994. The penn treebank: Annotating predicate argument structure. In Proceedings of the Workshop on Human Language Technology, HLT '94, pages 114-119, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in neural information processing systems, pages 3111-3119.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Composition is the core driver of the language-selective network", |
|
"authors": [ |
|
{ |
|
"first": "Francis", |
|
"middle": [], |
|
"last": "Mollica", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Siegelman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeniia", |
|
"middle": [], |
|
"last": "Diachek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Steven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Piantadosi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Mineroff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hope", |
|
"middle": [], |
|
"last": "Futrell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Kean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evelina", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fedorenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Neurobiology of Language", |
|
"volume": "1", |
|
"issue": "1", |
|
"pages": "104--134", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Francis Mollica, Matthew Siegelman, Evgeniia Di- achek, Steven T Piantadosi, Zachary Mineroff, Richard Futrell, Hope Kean, Peng Qian, and Evelina Fedorenko. 2020. Composition is the core driver of the language-selective network. Neurobiology of Language, 1(1):104-134.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Insights on representational similarity in neural networks with canonical correlation", |
|
"authors": [ |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Morcos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maithra", |
|
"middle": [], |
|
"last": "Raghu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samy", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems 31", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5732--5741", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ari Morcos, Maithra Raghu, and Samy Bengio. 2018. Insights on representational similarity in neural net- works with canonical correlation. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa- Bianchi, and R. Garnett, editors, Advances in Neu- ral Information Processing Systems 31, pages 5732- 5741. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. Glove: Global vectors for word rep- resentation. In Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Out of order: How important is the sequential order of words in a sentence in natural language understanding tasks?", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Thang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trung", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Bui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anh", |
|
"middle": [], |
|
"last": "Mai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang M. Pham, Trung Bui, Long Mai, and Anh Nguyen. 2020. Out of order: How important is the sequential order of words in a sentence in natural language understanding tasks?", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Svcca: Singular vector canonical correlation analysis for deep learning dynamics and interpretability", |
|
"authors": [ |
|
{ |
|
"first": "Maithra", |
|
"middle": [], |
|
"last": "Raghu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Gilmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Yosinski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jascha", |
|
"middle": [], |
|
"last": "Sohl-Dickstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "6076--6085", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maithra Raghu, Justin Gilmer, Jason Yosinski, and Jascha Sohl-Dickstein. 2017. Svcca: Singular vec- tor canonical correlation analysis for deep learning dynamics and interpretability. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vish- wanathan, and R. Garnett, editors, Advances in Neu- ral Information Processing Systems 30, pages 6076- 6085. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Visualizing and Measuring the Geometry of BERT", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Reif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Wattenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernanda", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Viegas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Coenen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Pearce", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8592--8600", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Reif, Ann Yuan, Martin Wattenberg, Fernanda B Viegas, Andy Coenen, Adam Pearce, and Been Kim. 2019. Visualizing and Measuring the Geometry of BERT. In Advances in Neural Information Process- ing Systems, pages 8592-8600.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter", |
|
"authors": [ |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Hierarchical reasoning by neural circuits in the frontal cortex", |
|
"authors": [ |
|
{ |
|
"first": "Morteza", |
|
"middle": [], |
|
"last": "Sarafyazd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mehrdad", |
|
"middle": [], |
|
"last": "Jazayeri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Science", |
|
"volume": "364", |
|
"issue": "6441", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Morteza Sarafyazd and Mehrdad Jazayeri. 2019. Hi- erarchical reasoning by neural circuits in the frontal cortex. Science, 364(6441):eaav8911.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Learning hierarchical categories in deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Andrew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Saxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Surya", |
|
"middle": [], |
|
"last": "Mcclellans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ganguli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Annual Meeting of the Cognitive Science Society", |
|
"volume": "35", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew M Saxe, James L McClellans, and Surya Gan- guli. 2013. Learning hierarchical categories in deep neural networks. In Proceedings of the Annual Meet- ing of the Cognitive Science Society, volume 35.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1508.07909" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2015. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Deep inside convolutional networks: Visualising image classification models and saliency maps", |
|
"authors": [ |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Vedaldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karen Simonyan, Andrea Vedaldi, and Andrew Zisser- man. 2014. Deep inside convolutional networks: Vi- sualising image classification models and saliency maps.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Masked language modeling and the distributional hypothesis: Order word matters pre-training for little", |
|
"authors": [ |
|
{ |
|
"first": "Koustuv", |
|
"middle": [], |
|
"last": "Sinha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dieuwke", |
|
"middle": [], |
|
"last": "Hupkes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joelle", |
|
"middle": [], |
|
"last": "Pineau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Koustuv Sinha, Robin Jia, Dieuwke Hupkes, Joelle Pineau, Adina Williams, and Douwe Kiela. 2021. Masked language modeling and the distributional hypothesis: Order word matters pre-training for lit- tle.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "The hippocampus as a predictive map", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Kimberly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stachenfeld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Botvinick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gershman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Nature neuroscience", |
|
"volume": "20", |
|
"issue": "11", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kimberly L Stachenfeld, Matthew M Botvinick, and Samuel J Gershman. 2017. The hippocampus as a predictive map. Nature neuroscience, 20(11):1643.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Syntax-Infused Transformer and BERT models for Machine Translation and Natural Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Dhanasekar", |
|
"middle": [], |
|
"last": "Sundararaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guoyin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shijing", |
|
"middle": [], |
|
"last": "Si", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dinghan", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Carin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.06156" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dhanasekar Sundararaman, Vivek Subramanian, Guoyin Wang, Shijing Si, Dinghan Shen, Dong Wang, and Lawrence Carin. 2019. Syntax-Infused Transformer and BERT models for Machine Translation and Natural Language Understanding. arXiv:1911.06156.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "BERT Rediscovers the Classical NLP Pipeline", |
|
"authors": [ |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Tenney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian Tenney, Dipanjan Das, and Ellie Pavlick. 2019. BERT Rediscovers the Classical NLP Pipeline. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Toward the neural implementation of structure learning", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Gowanlock", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Tervo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Tenenbaum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Gershman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Current opinion in neurobiology", |
|
"volume": "37", |
|
"issue": "", |
|
"pages": "99--105", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D Gowanlock R Tervo, Joshua B Tenenbaum, and Samuel J Gershman. 2016. Toward the neural im- plementation of structure learning. Current opinion in neurobiology, 37:99-105.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Mechanisms of face perception", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Doris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Tsao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Livingstone", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Annu. Rev. Neurosci", |
|
"volume": "31", |
|
"issue": "", |
|
"pages": "411--437", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Doris Y Tsao and Margaret S Livingstone. 2008. Mechanisms of face perception. Annu. Rev. Neu- rosci., 31:411-437.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "StructBERT: Incorporating Language Structures into Pre-training for Deep Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Bi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zuyi", |
|
"middle": [], |
|
"last": "Bao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiangnan", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liwei", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luo", |
|
"middle": [], |
|
"last": "Si", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Wang, Bin Bi, Ming Yan, Chen Wu, Zuyi Bao, Jiangnan Xia, Liwei Peng, and Luo Si. StructBERT: Incorporating Language Structures into Pre-training for Deep Language Understanding.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "SwitchOut: an efficient data augmentation algorithm for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Xinyi", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xinyi Wang, Hieu Pham, Zihang Dai, and Graham Neu- big. 2018. SwitchOut: an efficient data augmen- tation algorithm for neural machine translation. In Proceedings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Hierarchical representation in neural language models: Suppression and recovery of expectations", |
|
"authors": [ |
|
{ |
|
"first": "Ethan", |
|
"middle": [], |
|
"last": "Wilcox", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Futrell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.04068" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ethan Wilcox, Roger Levy, and Richard Futrell. 2019. Hierarchical representation in neural language mod- els: Suppression and recovery of expectations. arXiv preprint arXiv:1906.04068.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "XLNet: Generalized Autoregressive Pretraining for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NeurIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Ruslan Salakhutdinov, and Quoc V. Le. 2019. XLNet: Generalized Autoregressive Pretrain- ing for Language Understanding. In Proceedings of NeurIPS.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "KERMIT: Complementing transformer architectures with encoders of explicit syntactic interpretations", |
|
"authors": [ |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Massimo Zanzotto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Santilli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonardo", |
|
"middle": [], |
|
"last": "Ranaldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Onorati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierfrancesco", |
|
"middle": [], |
|
"last": "Tommasino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francesca", |
|
"middle": [], |
|
"last": "Fallucchi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabio Massimo Zanzotto, Andrea Santilli, Leonardo Ranaldi, Dario Onorati, Pierfrancesco Tommasino, and Francesca Fallucchi. 2020. KERMIT: Comple- menting transformer architectures with encoders of explicit syntactic interpretations. In Proceedings of the 2020 Conference on Empirical Methods in Natu- ral Language Processing (EMNLP).", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "Do Transformers build complexity along their layers? (a) The representation of a word is a function of its context, and this cartoon illustrates an hypothesis that deeper representations use larger contexts. (b) An example parse tree, illustrating our notion of phrase complexity. (c) Cartoon of the distortion metric, where vectors are the z-scored feature vectors z, and color map vectors to words.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Swapping n-grams and phrases. (a) Examples of basic n-gram shuffles, where colors indicate the units of shuffling. (b) Distortion metric computed at each layer, conditioned on n-gram size. Error bars hereafter represent standard error across 400 examples. (c) An example parse tree, with phrase boundaries shown as grey brackets, and two loworder phrases marked; and examples of a phrasal and control swap, with colors corresponding to the phrases marked above. (d) Distortion, computed at each layer, using either the full sentence, the subsentence of unswapped words, or the subsentence of swapped words, conditioned on swap type. (e) Full-sentence distortion for VP and NP phrase swaps. (f) Partial linear regression coefficients (see A.4) for pre-trained and untrained BERT models after controlling for swap size.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"text": "Syntactic distance affects representational distortion. (a) An example of adjacent swaps which do and do not cross a phrase boundary, with low-order phrases colored. Phrase boundaries are drawn in red. (b) Distortion in each layer, but conditioned on the tree distance. (c) For each head (column) of each layer (row), the (Spearman) rank correlation between distortion and tree distance of the swapped words. Colors are such that red is positive, blue negative. (d) Rank correlations between distortion (of the full representation) in the trained and untrained BERT models. (e) Histogram of PMI values, for pairs in the same phrase and not. (f) Similar to b, but averaging all out-of-phrase swaps, and separating pairs above ('high') or below ('low') the median PMI.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"text": "Attention changes explain part of the sensitivity to tree distance. (a) An example of the attention matrices for all heads in a single layer (layer 8), given the above sentence as input. Phrases in the sentence are drawn as blocks in the matrix. (b) The change in attention between the unperturbed and perturbed attention weights, averaged over all out-of-phrase swaps. Columns are sorted independently by their value. (c)", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"text": "Replicating the adjacent word swapping experiments using different transformer architectures. Lines are the mean Frobenius distance, and the shading is \u00b11 standard error of the mean.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF5": { |
|
"type_str": "figure", |
|
"text": "Results from the pretrained BERT model using alternative distortion metrics, on the n-gram shuffling and phrase swap experiments.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF6": { |
|
"type_str": "figure", |
|
"text": "gram : market pessimism the 's Detroit in The gloomy reflects outlook \u2022 2-gram : 's pessimism in Detroit The market reflects the gloomy outlook \u2022 3-gram : The market 's gloomy outlook in pessimism reflects the Detroit \u2022 4-gram : in Detroit The market 's pessimism reflects the gloomy outlook \u2022 5-gram : the gloomy outlook in Detroit The market 's pessimism reflects \u2022 6-gram : outlook in Detroit The market 's pessimism reflects the gloomy \u2022 7-gram : in Detroit The market 's pessimism reflects the gloomy outlook", |
|
"num": null, |
|
"uris": null |
|
} |
|
} |
|
} |
|
} |