|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:34:41.590484Z" |
|
}, |
|
"title": "Layer-wise Guided Training for BERT: Learning Incrementally Refined Document Representations", |
|
"authors": [ |
|
{ |
|
"first": "Nikolaos", |
|
"middle": [], |
|
"last": "Manginas", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Athens University of Economics", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ilias", |
|
"middle": [], |
|
"last": "Chalkidis", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Athens University of Economics", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Prodromos", |
|
"middle": [], |
|
"last": "Malakasiotis", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Athens University of Economics", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Although BERT is widely used by the NLP community, little is known about its inner workings. Several attempts have been made to shed light on certain aspects of BERT, often with contradicting conclusions. A much raised concern focuses on BERT's over-parameterization and under-utilization issues. To this end, we propose o novel approach to fine-tune BERT in a structured manner. Specifically, we focus on Large Scale Multilabel Text Classification (LMTC) where documents are assigned with one or more labels from a large predefined set of hierarchically organized labels. Our approach guides specific BERT layers to predict labels from specific hierarchy levels. Experimenting with two LMTC datasets we show that this structured fine-tuning approach not only yields better classification results but also leads to better parameter utilization.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Although BERT is widely used by the NLP community, little is known about its inner workings. Several attempts have been made to shed light on certain aspects of BERT, often with contradicting conclusions. A much raised concern focuses on BERT's over-parameterization and under-utilization issues. To this end, we propose o novel approach to fine-tune BERT in a structured manner. Specifically, we focus on Large Scale Multilabel Text Classification (LMTC) where documents are assigned with one or more labels from a large predefined set of hierarchically organized labels. Our approach guides specific BERT layers to predict labels from specific hierarchy levels. Experimenting with two LMTC datasets we show that this structured fine-tuning approach not only yields better classification results but also leads to better parameter utilization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Despite BERT's (Devlin et al., 2019) popularity and effectiveness, little is known about its inner workings. Several attempts have been made to demystify certain aspects of BERT (Rogers et al., 2020) , often leading to contradicting conclusions. For instance, Clark et al. (2019) argue that attention measures the importance of a particular word when computing the next level representation for this word. However, Kovaleva et al. (2019) showed that most attention heads contain trivial linguistic information and follow a vertical pattern (attention to [cls] , [sep] , and punctuation tokens), which could be related to under-utilization or overparameterization issues. Other studies attempted to link specific BERT heads with linguistically interpretable functions (Htut et al., 2019; Clark et al., 2019; Kovaleva et al., 2019; Voita et al., 2019; Hoover et al., 2020; Lin et al., 2019) , agreeing that no single head densely encodes enough relevant information but instead different linguistic features are learnt by different attention heads. We hypothesize that the aforementioned largely contributes to the lack of attention-based explainability of BERT. Another open topic is how the knowledge is distributed across BERT layers. Most studies agree that syntactic knowledge is gathered in the middle layers (Hewitt and Manning, 2019; Goldberg, 2019; Jawahar et al., 2019) , while the final layers are more task-specific. Most importantly, it seems that any semantic knowledge is spread across the model, explaining why non-trivial tasks are better solved at the higher layers (Tenney et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 36, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 199, |
|
"text": "(Rogers et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 279, |
|
"text": "Clark et al. (2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 437, |
|
"text": "Kovaleva et al. (2019)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 559, |
|
"text": "[cls]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 567, |
|
"text": "[sep]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 767, |
|
"end": 786, |
|
"text": "(Htut et al., 2019;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 787, |
|
"end": 806, |
|
"text": "Clark et al., 2019;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 807, |
|
"end": 829, |
|
"text": "Kovaleva et al., 2019;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 830, |
|
"end": 849, |
|
"text": "Voita et al., 2019;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 850, |
|
"end": 870, |
|
"text": "Hoover et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 871, |
|
"end": 888, |
|
"text": "Lin et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1155, |
|
"end": 1160, |
|
"text": "BERT.", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1313, |
|
"end": 1339, |
|
"text": "(Hewitt and Manning, 2019;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1340, |
|
"end": 1355, |
|
"text": "Goldberg, 2019;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1356, |
|
"end": 1377, |
|
"text": "Jawahar et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1582, |
|
"end": 1603, |
|
"text": "(Tenney et al., 2019)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Driven by the above discussion, we propose a novel fine-tuning approach where different parts of BERT are guided to directly solve increasingly challenging classification tasks following an underlying label hierarchy. Specifically, we focus on Large Scale Multilabel Text Classification (LMTC) where documents are assigned with one or more labels from a large predefined set. The labels are organized in a hierarchy from general to specific concepts. Our approach attempts to tie specific BERT layers with specific hierarchy levels. In effect, each of these layers is responsible for predicting the labels of the corresponding level. We experiment with two LMTC datasets (EURLEX57K, MIMIC-III) and several variations of structured BERT training. Our contributions are: (a) We propose a novel structured approach to fine-tune BERT where specific layers are tied to specific hierarchy levels; (b) We show that structured training yields better results than the baseline across all levels of the hierarchy, while also leading to better parameter utilization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 683, |
|
"end": 693, |
|
"text": "MIMIC-III)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "EURLEX57K (Chalkidis et al., 2019) contains 57k EU legal acts from EURLEX. 1 Each act is approx. 700 words long and is annotated with one or more concepts from EUROVOC 2 which contains 7,391 concepts organized in an 8-level hierarchy. We truncate the hierarchy to 6 levels by discarding the last 2 levels which contain 50 rarely used labels. 3 (Johnson et al., 2017) contains approx. 52k discharge summaries from US hospitals. Each summary is approx. 1.6k words long and is annotated with one or more ICD-9 4 codes. ICD-9 contains 22,395 codes organized in a 7-level hierarchy. We truncate the hierarchy to 6 levels, discarding the first level which contains only 4 general codes. 3 Label Augmentation: In both datasets, we make the assumption that if a label l is assigned to a document then all of its ancestors should also be assigned to this document. Hence, we augment labels by annotating a document with all the ancestors of its assigned labels. For instance, in EUROVOC, if a document is annotated with the label grape it will also be annotated with grape's ancestors, i.e., fruit, plant product, and agri-foodstuffs ( Figure 2 ). This assumption is perfectly valid, while also having the added side effect of providing a more accurate test-bed for evaluation. For example, if a classifier mistakenly annotated the document with citrus fruit, a sibling of grape, in the non-augmented case it would receive a score of zero. By contrast, in the augmented case, assuming it correctly identified all the ancestors of citrus fruit it would receive a much higher score of 0.75 having correctly assigned the three ancestors of grape but not the (more specialized) label itself. Thus, we believe the model is evaluated more fairly in the augmented case with respect to the hierarchy. This type of evaluation is also in-line with the literature on hierarchical classification (Kosmopoulos et al., 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 34, |
|
"text": "(Chalkidis et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 366, |
|
"text": "(Johnson et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 681, |
|
"end": 682, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1875, |
|
"end": 1901, |
|
"text": "(Kosmopoulos et al., 2015)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1127, |
|
"end": 1135, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Before we proceed with the description of our methods ( Figure 1 ), we introduce some notation. Given a label hierarchy L of depth d, L n denotes the set of labels in the n th level of this hierarchy (n d).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 64, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Structured Learning with BERT", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Also, f i = \u03c3(W i \u2022 c i + b i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Structured Learning with BERT", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "is a classification function, where W i and b i are trainable parameters, c i is the [cls] token in the i th BERT layer, 5 and \u03c3 is the sigmoid activation function. Note that the sizes of W i and b depend on the number of labels that f i is responsible for predicting, i.e., if", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Structured Learning with BERT", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "f i predicts the labels of L n , W i \u2208 R |Ln|\u00d7768 and b i \u2208 R |Ln|\u00d71 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Structured Learning with BERT", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "FLAT: This is a simple baseline which uses f 12 to predict all labels in the hierarchy in a flat manner. In effect, W 12 \u2208 R |L|\u00d7768 and b i \u2208 R |L|\u00d71 . Note Labels Depth 1 2 3 4 5 6 Micro Macro EURLEX-BERT-BASE #Labels 21 127 567 3,861 2,284 481 7,341 7,341 FLAT 90.3 \u00b1 0.2 83.9 \u00b1 0.3 81.0 \u00b1 0.5 74.8 \u00b1 1.0 74.5 \u00b1 1.2 79.9 \u00b1 1.4 80.6 \u00b1 0.6 80.7 \u00b1 0.6", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 158, |
|
"end": 285, |
|
"text": "Labels Depth 1 2 3 4 5 6 Micro Macro EURLEX-BERT-BASE #Labels 21 127 567 3,861 2,284 481 7,341 7,341 FLAT", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Structured Learning with BERT", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "LAST-SIX 90.7 \u00b1 0.1 84.6 \u00b1 0.0 81.9 \u00b1 0.2 76.8 \u00b1 0.2 77.2 \u00b1 0.5 82.2 \u00b1 1.1 81.7 \u00b1 0.1 82.2 \u00b1 0.1 ONE-BY-ONE 90.0 \u00b1 0.0 84.3 \u00b1 0.1 81.7 \u00b1 0.2 76.2 \u00b1 0.2 76.7 \u00b1 0.5 81.6 \u00b1 0.2 81.3 \u00b1 0.1 81.7 \u00b1 0.0 IN-PAIRS 89.9 \u00b1 0.2 84.3 \u00b1 0.2 81.7 \u00b1 0.2 76.7 \u00b1 0.3 77.2 \u00b1 0.6 81.7 \u00b1 0.4 81.4 \u00b1 0.1 81.9 \u00b1 0.4 HYBRID 90.5 \u00b1 0.2 84.3 \u00b1 0.1 81.7 \u00b1 0.2 76.6 \u00b1 0.4 76.6 \u00b1 0.7 81.8 \u00b1 0.7 81.5 \u00b1 0.2 81.9 \u00b1 0.0 76.8 \u00b1 0.2 67.3 \u00b1 0.1 58.5 \u00b1 0.1 51.2 \u00b1 0.0 43.8 \u00b1 0.3 40.9 \u00b1 0.1 60.4 \u00b1 0.0 56.4 \u00b1 0.1 ONE-BY-ONE 75.7 \u00b1 0.0 66.7 \u00b1 0.1 57.9 \u00b1 0.1 50.6 \u00b1 0.1 43.4 \u00b1 0.3 41.8 \u00b1 0.6 59.8 \u00b1 0.1 56.0 \u00b1 0.2 IN-PAIRS 75.6 \u00b1 0.1 66.5 \u00b1 0.1 58.1 \u00b1 0.1 50.9 \u00b1 0.1 43.6 \u00b1 0.3 40.9 \u00b1 0.8 59.9 \u00b1 0.1 55.9 \u00b1 0.2 HYBRID 76.4 \u00b1 0.1 67.0 \u00b1 0.1 58.5 \u00b1 0.1 51.3 \u00b1 0.0 43.8 \u00b1 0.2 40.0 \u00b1 0.4 60.3 \u00b1 0.0 56.2 \u00b1 0.1 (Chalkidis et al., 2020) . However, our results are not directly comparable to Chalkidis et al. (2020) because our methods operate on augmented label sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 204, |
|
"text": "IN-PAIRS", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 575, |
|
"end": 583, |
|
"text": "IN-PAIRS", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 767, |
|
"end": 791, |
|
"text": "(Chalkidis et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 846, |
|
"end": 869, |
|
"text": "Chalkidis et al. (2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Structured Learning with BERT", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "LAST-SIX: This method uses the classifiers f 7 through f 12 to predict the labels in L 1 through L 6 , respectively. Our intuition is that the layers 1-6 will retain and enhance their pre-trained functionality, i.e., syntactic knowledge, contextualized representations, while layers 7-12 will leverage this knowledge to better solve their individual tasks. We also expect that the model will show higher parameter utilization for the layers 7-12 since they are forced to solve gradually more refined classification tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Structured Learning with BERT", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "This method utilizes the full depth of BERT in a \"skip one, use one\" fashion, i.e., it uses classifiers f i , i \u2208 {2, 4, . . . , 12}. In effect, the odd layers (1, 3, . . . , 11) are updated only indirectly, through the classification tasks of the even layers. We expect that the odd layers will learn rich latent representations to facilitate the classifiers of the even layers. Spreading the classification tasks across the whole depth of the model will potentially lead to better parameter utilization. On the other hand, it could harm the model's pre-trained functionality and hence its performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ONE-BY-ONE:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This method also exploits the full depth of BERT, but now the layers are grouped in 6 pairs, p n \u2208 {(1, 2), (3, 4), . . . , (11, 12)}. The classifier responsible for the labels of L n operates on the concatenated [cls] tokens of the corresponding pair, e.g.,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "IN-PAIRS:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "f 1 = \u03c3(W 1 \u2022 [c 1 ; c 2 ] + b 1 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "IN-PAIRS:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "is trained on the labels of L 1 . We expect IN-PAIRS to have better pa-rameter utilization than ONE-BY-ONE, although the risk of hindering performance is now even higher.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "IN-PAIRS:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "HYBRID: Similarly to LAST-SIX, this method skips some of the lower BERT layers (3 instead of 6). Also, it ties L 1 , L 2 , and L 6 , which are the hierarchy levels with the fewest labels to layers 4, 5, and 12, respectively. Finally, similarly to IN-PAIRS the remaining BERT layers are grouped in pairs and are tied to the rest of the hierarchy levels. We expect the first three layers to retain and enhance their pre-trained functionality, while the hierarchy levels with a large number of labels will benefit from the additional parameters at their disposal.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "IN-PAIRS:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We report R-Precision (Manning et al., 2009) at each hierarchy level as well as micro (flat) and macro averages across all levels. 6 Table 1 shows the results in both datasets. In EURLEX57K our structured methods always outperform the baseline mostly by a large margin. LAST-SIX achieves the best overall results and is superior than the other structured methods in all hierarchy levels indicating that allowing the lower layers to retain and enhance their pre-trained functionality is crucial. Similar observations can be made for MIMIC-III, but in this case the importance of not damaging BERT's pretrained functionality is even higher, as evident by the only minor improvements ONE-BY-ONE and IN-PAIRS have compared to FLAT. 7 FLAT 4.7 1.3 3.9 0.7 2.5 0.9 3.6 1.7 3.3 2.6 3.2 3.2 3.4 3.1 2.7 2.6 2.1 2.1 2.6 1.8 2.2 1.2 2.4 0.6", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 44, |
|
"text": "(Manning et al., 2009)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 131, |
|
"end": 132, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 140, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "LAST-SIX 4.6 1.5 4.2 0.6 4.1 0.8 3.9 1.1 2.7 2.7 2.9 3.6 2.9 3.0 2.6 2.9 3.0 4.2 4.5 3.9 4.3 2.3 5.2 0.6 ONE-BY-ONE 4.5 1.9 4.1 2.4 4.0 4.7 4.0 3.8 3.7 4.2 2.8 2.9 4.6 2.7 4.0 3.0 3.9 1.7 3.9 1.8 4.4 1.4 4.5 0.2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "4.4 1.9 3.9 3.0 3.9 4.6 4.1 4.2 3.2 3.6 3.5 3.5 4.1 2.5 3.5 2.8 3.7 2.0 3.7 1.4 4.2 1.1 5.1 0.2 HYBRID 4.6 2.0 4.5 0.9 4.2 1.7 4.2 2.1 3.8 3.4 3.1 3.7 4.1 3.8 3.8 3.6 4.1 2.8 4.3 2.5 4.1 1.1 4.7 0.3 Figure 3 shows the average angular distances between the [cls] representations of each layer on development data of EURLEX57K. 8 The angular distance is calculated on unit (L 2 normalized) vectors, takes values in [0, 1], and a distance of 0.5 indicates an angle of 90 \u2022 . We observe that ONE-BY-ONE leads to larger angles between the representations than LAST-SIX which in turn yields larger angles than FLAT. In effect, ONE-BY-ONE and to a lesser extent LAST-SIX lead to a better parameter utilization than FLAT. To better support this claim we provide a geometric interpretation. We first L 2 normalize all [cls] representations. Each normalized representation can be interpreted as a vector having its initial point at the origin and its terminal point at the surface of a 768-dimensional hyper-sphere (centered at the origin). The larger the angle between two [cls] vectors the further apart they are on the hyper-sphere's surface. Effectively, [cls] vectors with large angles between them cover a larger subarea of the hyper-sphere's surface indicating that the vector space is utilized to a higher extent which directly implies better parameter utilization.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 199, |
|
"end": 207, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "IN-PAIRS", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Comparing attention distributions: Figure 4 shows the KL-Divergence of the average (across heads) attention for all layers on the development data of EURLEX57K. 8 A high KL-Divergence indicates that two layers attend to different sub-word units. Moreover, Table 2 reports the entropy (left column per layer) of the average (across heads) attention distribution per layer. A high entropy indicates that a layer attends to more sub-word units. Table 2 also reports the average KL-Divergence (right column per layer) between the attention distributions of each possible pair of heads in a layer. A high KL-Divergence indicates that each head attends to different sub-word units. A first observation is that FLAT attends to almost the same subword units across layers (small entropy differences and KL-Divergence across layers). Interestingly, the different attention heads focus on different subword units only in the middle layers (5-8). On the other hand, all the structured methods show better utilization of the attention mechanism, having higher entropy and KL-Divergence both across heads (Table 2) and across layers (Figure 4 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 43, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 256, |
|
"end": 263, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 442, |
|
"end": 449, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1092, |
|
"end": 1101, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1120, |
|
"end": 1129, |
|
"text": "(Figure 4", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "IN-PAIRS", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our approach is similar to Wehrmann et al. 2018but they experiment with fully connected networks, which are not well suited for text classification, contrary to stacked transformers (Vaswani et al., 2017; Devlin et al., 2019) . Similarly, Yan et al. (2015) used Convolutional Neural Networks, albeit with shallow hierarchies (2 levels). Although our approach leverages the label hierarchy it should not be confused with hierarchical classification methods (Silla and Freitas, 2011), which typically employ one classifier per node and cannot scale-up to large hierarchies when considering neural classifiers. A notable exception is the work of You et al. (2019) who employed one bidirectional LSTM with label-wise attention (You et al., 2018) per hierarchy node. However, for their method to scale-up, they use probabilistic label trees (Khandagale et al., 2019) to organize the labels in their own shallow hierarchy which does not follow the abstraction level of the original hierarchy. To the best of our knowledge we are the first to apply this approach to pre-trained language models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 204, |
|
"text": "(Vaswani et al., 2017;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 225, |
|
"text": "Devlin et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 256, |
|
"text": "Yan et al. (2015)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 643, |
|
"end": 660, |
|
"text": "You et al. (2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 723, |
|
"end": 741, |
|
"text": "(You et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 836, |
|
"end": 861, |
|
"text": "(Khandagale et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We proposed a novel guided approach to fine-tune BERT, where specific layers are tied to specific hierarchy levels. Experimenting with two LMTC datasets, we showed that structured training not only yields better results than a flat baseline, but also leads to better parameter utilization. In the future we will try to further increase the parameter utilization by guiding BERT's attention heads to explicitly focus on specific hierarchy parts. We also plan to improve the explainability of our methods with respect to the utilization of their parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Further Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Hierarchy truncation: In order to directly apply all our methods, we truncate both hierarchies and reduce their depth to six. We believe this truncation is justified since in EUROVOC the last two layers contain a very small number of labels, which are rarely, if at all, assigned and in ICD-9 the first layer also contains a very small number of labels which are very general and can be trivially classified (Table 3). In both cases it seems that only minimal information is lost which would have small practical use in the classification tasks. Document Truncation: Documents in both datasets are often above the 512 token limit of BERT. To reduce document size, we perform a number of pre-processing normalizations, including removal of numeric tokens, punctuation and stop-words. 9 In EURLEX documents have been tokenized using SpaCy's default tokenizer, 10 while in Depth EUROVOC ICD-9 1 21 4* 2 127 79 3 568 589 4 4,545 3,982 5 2,335 9,640 6 497 7,234 7 79* 867 8 6* -Overall 8,178 / 8,093 22,395 / 22,391 Table 3 : Label distribution across EUROVOC and ICD-9 hierarchy levels. Concepts (labels) are arranged from more abstract (level 1-2) to more specialized ones (levels 6-8). Labels with an asterisk are truncated in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 870, |
|
"end": 994, |
|
"text": "Depth EUROVOC ICD-9 1 21 4* 2 127 79 3 568 589 4 4,545 3,982 5 2,335 9,640 6 497 7,234 7 79* 867 8", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1037, |
|
"end": 1044, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Data manipulation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "MIMIC-III, we use regular expressions tailored for the biomedical domain. While document length is severely reduced post normalization, if a document still has a larger number of tokens, i.e. more than 512, we use the first 512 tokens and ignore the rest.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Data manipulation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "All our methods build on BERT-BASE and are implemented in Tensorflow 2. For EURLEX we use the original BERT-BASE (Devlin et al., 2019) , while for MIMIC-III we use SCIBERT (Beltagy et al., 2019) , which has the same architecture (12 layers, 768 hidden units, 12 attention heads), and better suits biomedical documents. 11 Our models are tuned by grid searching three learning rates (2e-5, 3e-5, 5e-5) and two drop-out rates (0, 0.1). We use the Adam optimizer (Kingma and Ba, 2015) with early stopping on validation loss. In preliminary experiments, we found that weighting individual losses with respect to the number of labels in each level is crucial. We therefore weigh each loss by the percentage of labels at the corresponding level, i.e., w n = |Ln| |L| , where |L n | is the number of labels in the n th level of the hierarchy and |L| is the total number of labels across all levels, e.g., in EURLEX57K, w 1 = 21 8093 \u2248 0.0026.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 134, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 172, |
|
"end": 194, |
|
"text": "(Beltagy et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Experimental Setup", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The literature of LMTC (Rios and Kavuluru, 2018; Chalkidis et al., 2019) mostly uses information retrieval evaluation measures. We support the premise that when the number of labels is that large the problem mimics retrieval with each document acting as a query and the model having to score relevant labels higher than the rest. However in our study, it would be really confusing to report the standard retrieval metrics Recall@R, Precision@K, nDCG@K since we evaluate our classifiers at each hierarchy depth and reasonable values for K have large fluctuations between levels, as the number of labels per level vastly varies (see Table 3 ). Instead, we prefer R-Precision (Manning et al., 2009) , which is the Precision@R where R is the number of gold labels associated with each document. It follows that R-Precision can neither under-estimate (penalize) nor over-estimate the performance of the models (Chalkidis et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 48, |
|
"text": "(Rios and Kavuluru, 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 49, |
|
"end": 72, |
|
"text": "Chalkidis et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 673, |
|
"end": 695, |
|
"text": "(Manning et al., 2009)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 905, |
|
"end": 929, |
|
"text": "(Chalkidis et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 631, |
|
"end": 638, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "C Evaluation in LMTC", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our experiments we observe a hindered performance in MIMIC-III, which can be attributed to a number of characteristics of the dataset. Firstly, documents contain a lot of non-trivial biomedical terminology which naturally makes the classification task more difficult. Further, discharge summaries describe a patient's condition during their hospitalization and therefore proper label annotations change throughout the document as the patient's diagnosis changes or as they exhibit new symptoms, e.g., \"the patient was admitted to the hospital with no heart issues, [. . . ] the patient had a heart failure and died.\". Both the in-domain language and the constant change of events make the dataset more challenging than EURLEX57K, where documents are more organized and well-written also with simpler language. It therefore seems reasonable that in MIMIC-III allowing lower BERT layers to retain and enhance the preliminary functionality, without explicitly guiding them, is of utmost importance. We would like to highlight that even though we use SCIBERT (Beltagy et al., 2019) , which is based on a new scientific vocabulary, we observe that specialized biomedical terms are often over-fragmented in multiple sub-word units, e.g. 'atelectasis' splits into ['ate', '##lect', '##asis']. Thus, the initial layers need to decipher these over-fragmented sub-word units and reconstruct the original word semantics. On the contrary, in EURLEX57K, classifying general concepts in the initial layers, even considering only the sub-word unit embeddings is plausible.", |
|
"cite_spans": [ |
|
{ |
|
"start": 568, |
|
"end": 576, |
|
"text": "[. . . ]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1058, |
|
"end": 1080, |
|
"text": "(Beltagy et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "D Peculiarities of MIMIC-III dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We present additional results for the rest of the methods (IN-PAIRS, HYBRID) . Figure 5 shows the average angular distances between the [cls] representations of each layer ( Figure 5 ) for all con- sidered methods. We observe that the distances of IN-PAIRS between consecutive [cls] representations follow a similar pattern with those of ONE-BY-ONE, with the exception of 0.25+ distances which are more dense in the upper layers for IN-PAIRS. This is reasonable, since in IN-PAIRS all layers directly contribute to the classification tasks. The pattern of HYBRID is very similar to ONE-BY-ONE and IN-PAIRS, except for the first three non-guided layers in which distances bear close resemblance to those of the corresponding layers in LAST-SIX. Similar observations hold for MIMIC-III ( Figure 7) . Finally, Figure 6 shows the KL-Divergence of the average (across heads) attention for all layers on the development data. All structured methods show better utilization of the attention mechanism than FLAT, having higher KL-Divergence across layers. Contrary, in MIMIC-III, all structured methods fol- low a similar pattern of low KL-Divergence across layers (Figure 8 ), even lower than the upper layers of FLAT, i.e., the models attend to similar sub-word positions across layers. We aim to further study and explain this behaviour in future work.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 76, |
|
"text": "(IN-PAIRS, HYBRID)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 79, |
|
"end": 87, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 174, |
|
"end": 182, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 786, |
|
"end": 795, |
|
"text": "Figure 7)", |
|
"ref_id": "FIGREF6" |
|
}, |
|
{ |
|
"start": 807, |
|
"end": 815, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 1157, |
|
"end": 1166, |
|
"text": "(Figure 8", |
|
"ref_id": "FIGREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "E Discussion on model utilization", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://eur-lex.europa.eu/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://eurovoc.europa.eu/ 3 For more details on data manipulation see Appendix A. 4 www.who.int/classifications/icd/en/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use BERT-BASE (12 layers, 768 units, 12 heads).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "See Appendices B and C for a detailed description on experimental setup and a discussion on LMTC evaluation.7 This is probably due to the additional difficulties of the clinical domain. See Appendix D for a discussion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For brevity, we only show the heatmaps of three methods. We include the missing ones in Appendix E.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Similar procedures are very common in classification, thus we believe they do not harm text semantics.10 https://spacy.io", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the Transformers library of Huggingface (https: //github.com/huggingface/transformers).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "SciB-ERT: A pretrained language model for scientific text", |
|
"authors": [ |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3606--3611", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. SciB- ERT: A pretrained language model for scientific text. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 3606- 3611, Hong Kong, China.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Large-Scale Multi-Label Text Classification on EU Legislation", |
|
"authors": [ |
|
{ |
|
"first": "Ilias", |
|
"middle": [], |
|
"last": "Chalkidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emmanouil", |
|
"middle": [], |
|
"last": "Fergadiotis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6314--6322", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilias Chalkidis, Emmanouil Fergadiotis, Prodromos Malakasiotis, and Ion Androutsopoulos. 2019. Large-Scale Multi-Label Text Classification on EU Legislation. In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 6314-6322, Florence, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Sotiris Kotitsas, Prodromos Malakasiotis, Nikolaos Aletras, and Ion Androutsopoulos. 2020. An empirical study on large-scale multi-label text classification including few and zero-shot labels", |
|
"authors": [ |
|
{ |
|
"first": "Ilias", |
|
"middle": [], |
|
"last": "Chalkidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manos", |
|
"middle": [], |
|
"last": "Fergadiotis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilias Chalkidis, Manos Fergadiotis, Sotiris Kotitsas, Prodromos Malakasiotis, Nikolaos Aletras, and Ion Androutsopoulos. 2020. An empirical study on large-scale multi-label text classification including few and zero-shot labels.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "What does BERT look at? an analysis of BERT's attention", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Urvashi", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "276--286", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-4828" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D. Manning. 2019. What does BERT look at? an analysis of BERT's attention. In Pro- ceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 276-286, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. Proceedings of the Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, abs/1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Assessing bert's syntactic abilities", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Goldberg. 2019. Assessing bert's syntactic abili- ties.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A structural probe for finding syntax in word representations", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Hewitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4129--4138", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1419" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Hewitt and Christopher D. Manning. 2019. A structural probe for finding syntax in word repre- sentations. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4129-4138, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "2020. exBERT: A Visual Analysis Tool to Explore Learned Representations in Transformer Models", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Hoover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hendrik", |
|
"middle": [], |
|
"last": "Strobelt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Gehrmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "187--196", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-demos.22" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Hoover, Hendrik Strobelt, and Sebastian Gehrmann. 2020. exBERT: A Visual Analysis Tool to Explore Learned Representations in Transformer Models. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics: System Demonstrations, pages 187-196, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Do attention heads in bert track syntactic dependencies?", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Phu Mon Htut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shikha", |
|
"middle": [], |
|
"last": "Phang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bordia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Phu Mon Htut, Jason Phang, Shikha Bordia, and Samuel R. Bowman. 2019. Do attention heads in bert track syntactic dependencies?", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "What does BERT learn about the structure of language", |
|
"authors": [ |
|
{ |
|
"first": "Ganesh", |
|
"middle": [], |
|
"last": "Jawahar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beno\u00eet", |
|
"middle": [], |
|
"last": "Sagot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Djam\u00e9", |
|
"middle": [], |
|
"last": "Seddah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3651--3657", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1356" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ganesh Jawahar, Beno\u00eet Sagot, and Djam\u00e9 Seddah. 2019. What does BERT learn about the structure of language? In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 3651-3657, Florence, Italy. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "MIMIC-III, a freely accessible critical care database", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Alistair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Stone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Celi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pollard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Nature", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alistair EW Johnson, David J. Stone, Leo A. Celi, and Tom J. Pollard. 2017. MIMIC-III, a freely accessi- ble critical care database. Nature.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Bonsai -Diverse and Shallow Trees for Extreme Multi-label Classification", |
|
"authors": [ |
|
{ |
|
"first": "Sujay", |
|
"middle": [], |
|
"last": "Khandagale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Han", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rohit", |
|
"middle": [], |
|
"last": "Babbar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sujay Khandagale, Han Xiao, and Rohit Babbar. 2019. Bonsai -Diverse and Shallow Trees for Extreme Multi-label Classification. CoRR, abs/1904.08249.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jim", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 5th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jim Ba. 2015. Adam: A method for stochastic optimization. In Proceed- ings of the 5th International Conference on Learn- ing Representations.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Evaluation measures for hierarchical classification: a unified view and novel approaches", |
|
"authors": [ |
|
{ |
|
"first": "Aris", |
|
"middle": [], |
|
"last": "Kosmopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ioannis", |
|
"middle": [], |
|
"last": "Partalas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Gaussier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Data Mining and Knowledge Discovery", |
|
"volume": "29", |
|
"issue": "3", |
|
"pages": "820--865", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aris Kosmopoulos, Ioannis Partalas, Eric Gaussier, Georgios Paliouras, and Ion Androutsopoulos. 2015. Evaluation measures for hierarchical classification: a unified view and novel approaches. Data Mining and Knowledge Discovery, 29(3):820-865.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Revealing the dark secrets of BERT", |
|
"authors": [ |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Kovaleva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexey", |
|
"middle": [], |
|
"last": "Romanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rogers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rumshisky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4365--4374", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1445" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Olga Kovaleva, Alexey Romanov, Anna Rogers, and Anna Rumshisky. 2019. Revealing the dark secrets of BERT. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 4365-4374, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Open sesame: Getting inside BERT's linguistic knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Yongjie", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Chern Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "241--253", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-4825" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yongjie Lin, Yi Chern Tan, and Robert Frank. 2019. Open sesame: Getting inside BERT's linguistic knowledge. In Proceedings of the 2019 ACL Work- shop BlackboxNLP: Analyzing and Interpreting Neu- ral Networks for NLP, pages 241-253, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Introduction to Information Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prabhakar", |
|
"middle": [], |
|
"last": "Raghavan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher D. Manning, Prabhakar Raghavan, and Hinrich Sch\u00fctze. 2009. Introduction to Information Retrieval. Cambridge University Press.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Few-Shot and Zero-Shot Multi-Label Learning for Structured Label Spaces", |
|
"authors": [ |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Rios", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramakanth", |
|
"middle": [], |
|
"last": "Kavuluru", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3132--3142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anthony Rios and Ramakanth Kavuluru. 2018. Few- Shot and Zero-Shot Multi-Label Learning for Struc- tured Label Spaces. In Proceedings of the 2018 Con- ference on Empirical Methods in Natural Language Processing, pages 3132-3142. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "2020. A primer in bertology: What we know about how bert works", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rogers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Kovaleva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rumshisky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2002.12327" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Rogers, Olga Kovaleva, and Anna Rumshisky. 2020. A primer in bertology: What we know about how bert works. arXiv preprint arXiv:2002.12327.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A survey of hierarchical classification across different application domains", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Carlos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Silla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Freitas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Data Mining and Knowledge Discovery", |
|
"volume": "22", |
|
"issue": "1-2", |
|
"pages": "31--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carlos N Silla and Alex A Freitas. 2011. A survey of hi- erarchical classification across different application domains. Data Mining and Knowledge Discovery, 22(1-2):31-72.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "BERT rediscovers the classical NLP pipeline", |
|
"authors": [ |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Tenney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4593--4601", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1452" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian Tenney, Dipanjan Das, and Ellie Pavlick. 2019. BERT rediscovers the classical NLP pipeline. In Proceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4593- 4601, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Attention Is All You Need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "31th Annual Conference on Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention Is All You Need. In 31th Annual Conference on Neural Information Processing Systems, Long Beach, CA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Analyzing multi-head self-attention: Specialized heads do the heavy lifting, the rest can be pruned", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Voita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Talbot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fedor", |
|
"middle": [], |
|
"last": "Moiseev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5797--5808", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1580" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Voita, David Talbot, Fedor Moiseev, Rico Sen- nrich, and Ivan Titov. 2019. Analyzing multi-head self-attention: Specialized heads do the heavy lift- ing, the rest can be pruned. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 5797-5808, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Hierarchical multi-label classification networks", |
|
"authors": [ |
|
{ |
|
"first": "Jonatas", |
|
"middle": [], |
|
"last": "Wehrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricardo", |
|
"middle": [], |
|
"last": "Cerri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [], |
|
"last": "Barros", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 35th International Conference on Machine Learning", |
|
"volume": "80", |
|
"issue": "", |
|
"pages": "5075--5084", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonatas Wehrmann, Ricardo Cerri, and Rodrigo Bar- ros. 2018. Hierarchical multi-label classification net- works. In Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pages 5075-5084, Stockholmsm\u00e4ssan, Stockholm Swe- den. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Hd-cnn: Hierarchical deep convolutional neural networks for large scale visual recognition", |
|
"authors": [ |
|
{ |
|
"first": "Zhicheng", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robinson", |
|
"middle": [], |
|
"last": "Piramuthu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vignesh", |
|
"middle": [], |
|
"last": "Jagadeesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dennis", |
|
"middle": [], |
|
"last": "Decoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Di", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yizhou", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "2015 IEEE International Conference on Computer Vision (ICCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2740--2748", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhicheng Yan, Hao Zhang, Robinson Piramuthu, Vi- gnesh Jagadeesh, Dennis DeCoste, Wei Di, and Yizhou Yu. 2015. Hd-cnn: Hierarchical deep convo- lutional neural networks for large scale visual recog- nition. In 2015 IEEE International Conference on Computer Vision (ICCV), pages 2740-2748.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Attentionxml: Extreme multi-label text classification with multilabel attention based recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ronghui", |
|
"middle": [], |
|
"last": "You", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suyang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroshi", |
|
"middle": [], |
|
"last": "Mamitsuka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shanfeng", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1811.01727" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronghui You, Suyang Dai, Zihan Zhang, Hiroshi Mamitsuka, and Shanfeng Zhu. 2018. Attentionxml: Extreme multi-label text classification with multi- label attention based recurrent neural networks. arXiv preprint arXiv:1811.01727.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Attentionxml: Label tree-based attention-aware deep model for high-performance extreme multi-label text classification", |
|
"authors": [ |
|
{ |
|
"first": "Ronghui", |
|
"middle": [], |
|
"last": "You", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziye", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suyang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroshi", |
|
"middle": [], |
|
"last": "Mamitsuka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shanfeng", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "5812--5822", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronghui You, Zihan Zhang, Ziye Wang, Suyang Dai, Hiroshi Mamitsuka, and Shanfeng Zhu. 2019. At- tentionxml: Label tree-based attention-aware deep model for high-performance extreme multi-label text classification. In Advances in Neural Informa- tion Processing Systems 32, pages 5812-5822. Cur- ran Associates, Inc.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "The five variants of BERT-based multi-label classifiers including the flat one and the four structured editions. The circles represent the classification layers attached to [cls] tokens across layers." |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Examples from EUROVOC label hierarchy. Layerwise models consider all labels in the same level (Li; dashed boxes) of the hierarchy on-par." |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Angular distance between [cls] representations across layers." |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "KL-Divergence between attention distributions across layers." |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Angular distance between [cls] representations across layers in the development dataset of EURLEX57K." |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "KL-Divergence between attention distributions across layers in the development dataset of EURLEX57K." |
|
}, |
|
"FIGREF6": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Angular distance between [cls] representations across layers in the development dataset of MIMIC-III." |
|
}, |
|
"FIGREF7": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "KL-Divergence between attention distributions across layers in the development dataset of MIMIC-III." |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"text": "R-Precision \u00b1 std for all variants of BERT-BASE on EURLEX and MIMIC-III test data across hierarchy depths.", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"text": "Entropy (left column) and KL-Divergence (right column) of attention distributions per layer.", |
|
"html": null, |
|
"content": "<table><tr><td>5 Discussion on model utilization</td></tr></table>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |