ACL-OCL / Base_JSON /prefixL /json /latechclfl /2021.latechclfl-1.11.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2021",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T11:58:13.831474Z"
},
"title": "Data-Driven Detection of General Chiasmi Using Lexical and Semantic Features",
"authors": [
{
"first": "Felix",
"middle": [],
"last": "Schneider",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Friedrich Schiller University Jena Jena",
"location": {
"country": "Germany"
}
},
"email": ""
},
{
"first": "Phillip",
"middle": [],
"last": "Brandes",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Friedrich Schiller University Jena Jena",
"location": {
"country": "Germany"
}
},
"email": ""
},
{
"first": "Bj\u00f6rn",
"middle": [],
"last": "Barz",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Friedrich Schiller University Jena Jena",
"location": {
"country": "Germany"
}
},
"email": ""
},
{
"first": "Sophie",
"middle": [],
"last": "Marshall",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Friedrich Schiller University Jena Jena",
"location": {
"country": "Germany"
}
},
"email": ""
},
{
"first": "Joachim",
"middle": [],
"last": "Denzler",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Friedrich Schiller University Jena Jena",
"location": {
"country": "Germany"
}
},
"email": ""
},
{
"first": "Computer",
"middle": [
"Vision"
],
"last": "Group",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Friedrich Schiller University Jena Jena",
"location": {
"country": "Germany"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Automatic detection of stylistic devices is an important tool for literary studies, e.g., for stylometric analysis or argument mining. A particularly striking device is the rhetorical figure called chiasmus, which involves the inversion of semantically or syntactically related words. Existing works focus on a special case of chiasmi that involve identical words in an A B B A pattern, so-called antimetaboles. In contrast, we propose an approach targeting the more general and challenging case A B B' A', where the words A, A' and B, B' constituting the chiasmus do not need to be identical but just related in meaning. To this end, we generalize the established candidate phrase mining strategy from antimetaboles to general chiasmi and propose novel features based on word embeddings and lemmata for capturing both semantic and syntactic information. These features serve as input for a logistic regression classifier, which learns to distinguish between rhetorical chiasmi and coincidental chiastic word orders without special meaning. We evaluate our approach on two datasets consisting of classical German dramas, four texts with annotated chiasmi and 500 unannotated texts. Compared to previous methods for chiasmus detection, our novel features improve the average precision from 17% to 28% and the precision among the top 100 results from 13% to 35%.",
"pdf_parse": {
"paper_id": "2021",
"_pdf_hash": "",
"abstract": [
{
"text": "Automatic detection of stylistic devices is an important tool for literary studies, e.g., for stylometric analysis or argument mining. A particularly striking device is the rhetorical figure called chiasmus, which involves the inversion of semantically or syntactically related words. Existing works focus on a special case of chiasmi that involve identical words in an A B B A pattern, so-called antimetaboles. In contrast, we propose an approach targeting the more general and challenging case A B B' A', where the words A, A' and B, B' constituting the chiasmus do not need to be identical but just related in meaning. To this end, we generalize the established candidate phrase mining strategy from antimetaboles to general chiasmi and propose novel features based on word embeddings and lemmata for capturing both semantic and syntactic information. These features serve as input for a logistic regression classifier, which learns to distinguish between rhetorical chiasmi and coincidental chiastic word orders without special meaning. We evaluate our approach on two datasets consisting of classical German dramas, four texts with annotated chiasmi and 500 unannotated texts. Compared to previous methods for chiasmus detection, our novel features improve the average precision from 17% to 28% and the precision among the top 100 results from 13% to 35%.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Knowledge about the use and distribution of stylistic devices, such as the chiasmus, can be used for stylometric purposes such as attribution to a certain author or genre (Pasanek and Sculley, 2008) . Current computational methods for stylometry focus on analyzing word distributions and do not take more complex stylistic devices into account (Burrows, 2002; Jannidis, 2014; Dimpel et al., 2016) . However, since styles may also differ in their use of stylistic devices, this is an important factor to consider. Different stylistic devices can also be an indicator of the structure of arguments (Mitrovi\u0107 et al., 2017; Lawrence et al., 2017) . Automatic detection of stylistic devices is an important prerequisite for enabling such analyses at scale.",
"cite_spans": [
{
"start": 171,
"end": 198,
"text": "(Pasanek and Sculley, 2008)",
"ref_id": "BIBREF17"
},
{
"start": 344,
"end": 359,
"text": "(Burrows, 2002;",
"ref_id": "BIBREF1"
},
{
"start": 360,
"end": 375,
"text": "Jannidis, 2014;",
"ref_id": "BIBREF11"
},
{
"start": 376,
"end": 396,
"text": "Dimpel et al., 2016)",
"ref_id": "BIBREF2"
},
{
"start": 596,
"end": 619,
"text": "(Mitrovi\u0107 et al., 2017;",
"ref_id": "BIBREF16"
},
{
"start": 620,
"end": 642,
"text": "Lawrence et al., 2017)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The chiasmus is a stylistic device that is found in texts since the antiquity. Chiasmus is defined as an inversion of semantically or syntactically related words, phrases, or sentences in an A B B' A' pattern (Fauser, 1994) . It can be used, for example, to emphasize contrasts. One example for it is:",
"cite_spans": [
{
"start": 209,
"end": 223,
"text": "(Fauser, 1994)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Eng ist die Welt, und das Gehirn ist weit (Narrow is the world, and the brain is wide) Wallensteins Tod (Schiller, 1799) The semantically related words are narrow and wide, as well as world and brain.",
"cite_spans": [
{
"start": 104,
"end": 120,
"text": "(Schiller, 1799)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Existing works for automatic chiasmus detection focus on a special case called antimetabole, which consists of an inversion of identical lemmata (i.e. A=A', B=B'). An example for this is:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Un pour tous, tous pour un (One for all, all for one) Les Trois Mousquetaires (Dumas, 1844) Here, the inverted words are \"all\" and \"one\".",
"cite_spans": [
{
"start": 78,
"end": 91,
"text": "(Dumas, 1844)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In contrast to this, we are interested in finding general chiasmi, where the lemmata can be different. In this work we present the first machine learning based method for finding general chiasmi. Existing methods find candidates by searching for inversions of lemmata. However, such an approach cannot find general chiasmi like the one by Schiller mentioned above, which does not consist of an inversion of lemmata but of semantic concepts.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We propose searching for chiasmus candidates by finding inversions of part-of-speech (PoS) tags in an A B B' A' pattern instead of inversions of lemmata. This requires more sophisticated filtering of the candidates, which include much more false positives without rhetorical intention. To this end, we use a linear classifier and propose a novel set of lexical and semantic features. Our semantic features are based on word embeddings (Mikolov et al., 2013; Bojanowski et al., 2017) to include information about the semantic relationship between the words constituting the chiastic pattern.",
"cite_spans": [
{
"start": 435,
"end": 457,
"text": "(Mikolov et al., 2013;",
"ref_id": "BIBREF15"
},
{
"start": 458,
"end": 482,
"text": "Bojanowski et al., 2017)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Compared with existing methods, our novel features improve the average precision from 17% to 28% in an experiment on four fully annotated dramas by Friedrich Schiller. Further experiments on 493 unannotated texts confirm the usefulness of our automatic general chiasmus detector, which improves the precision among the top 100 rated chiasmus candidates from 13% to 35%.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Only few works address the detection of chiasmi, and the majority of those focus on antimetaboles and not the general case of chiasmus. While it is easy to capture all occurences of antimetabole in a text by just gathering all cross-wise repeating lemmata, this approach also yields a disproportionately high number of false positives (Gawryjolek, 2009) . Thus the main challenge is to remove the false positives from the set of chiasmus candidates.",
"cite_spans": [
{
"start": 335,
"end": 353,
"text": "(Gawryjolek, 2009)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "The approach of Dubremetz and Nivre (2015) introduces a filtering step for the chiasmus candidates. Here, the authors use hand-crafted features based on punctuation and word repetitions together with a maually tuned classifier. They expanded on their work with more features and machine learning Nivre, 2017, 2018) , and trained a Kernel SVM and a linear classifier to detect antimetaboles, which constitutes the current state of the art. In contrast to their method, we search for candidates not based on lemmata but on PoS tags. We furthermore include novel features incorporating lemma information and semantic information about the conceptual similarity of the words constituting the chiasmus.",
"cite_spans": [
{
"start": 16,
"end": 42,
"text": "Dubremetz and Nivre (2015)",
"ref_id": "BIBREF3"
},
{
"start": 296,
"end": 314,
"text": "Nivre, 2017, 2018)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "A first approach for general chiasmus detection was presented by Java (2015) by searching for inversions in syntax trees. However, this misses many true positives and they do not filter their results, leading to numerous false positives as well. In our work, we use inversions of PoS tags with an additional learning-based filtering step to find more candidates and discard more false positives.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Our method consists of two steps; first the detection of suitable chiasmus candidates, second filtering these candidates using machine learning.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Method",
"sec_num": "3"
},
{
"text": "Candidate Extraction First we extract candidates from the text. In contrast to Dubremetz and Nivre (2018), we do not search for inversions of lemmata but of PoS tags. Like them, we limit the candidate phrase length to 30 tokens. With this stragety we find candidates for general chiasmi as well as antimetaboles. However, the candidates do not include lemma information a priori, in contrast to lemma-based candidate mining. We also find more false positive candidates, which raises the performance requirements for the subsequent filtering step. Thus, in addition to the Dubremetz features we include two new types of features in our work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Method",
"sec_num": "3"
},
{
"text": "The features proposed by Dubremetz and Nivre (2018) comprise a combination of various elements like the number of identical words between the supporting words, the number of hard and soft punctiation marks at different positions, grammatical dependencies of the words, and n-gram-repetitions. For a full list of these features please refer to their work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dubremetz Features",
"sec_num": null
},
{
"text": "Lexical Features Since our generalized search procedure based on PoS tags does not consider lemma information anymore, we need to provide this information explicitly for the filter. Therefore, we include a novel set of lexical features to still capture this information. For all six pairs AB, AB', AA', BB', BA', B'A' of supporting tokens, we add a binary feature equaling 1 if the lemmata of the two tokens are identical and 0 otherwise.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dubremetz Features",
"sec_num": null
},
{
"text": "To distinguish true chiasmi from random PoS tag inversions, the semantic relationship between the supporting tokens needs to be considered. We use word embeddings to integrate this semantic information, since they represent words as vectors in a space, where the distance between two vectors signifies their semantic relation (Bojanowski et al., 2017) . For each pair of supporting tokens, we add an embedding feature equaling the cosine similarity (Salton et al., 1975) of the word embeddings of the two tokens.",
"cite_spans": [
{
"start": 326,
"end": 351,
"text": "(Bojanowski et al., 2017)",
"ref_id": "BIBREF0"
},
{
"start": 449,
"end": 470,
"text": "(Salton et al., 1975)",
"ref_id": "BIBREF18"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Embedding Features",
"sec_num": null
},
{
"text": "Candidate Ranking We train a logistic regression classifier using the aforementioned features to distinguish true chiasmi from random PoS tag in- versions. For ranking candidates, we use the score obtained from the classifier's decision function.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Embedding Features",
"sec_num": null
},
{
"text": "We perform two types of experiments 1 . First we evaluate the average precision (AP) of different feature combinations using 5-fold cross-validation on an annotated dataset to conduct an ablation study regarding the different feature types. AP describes the area under the precision-recall curve and is common in information retrieval. Additionally, we conduct this experiment on antimetabole candidates used by Dubremetz and Nivre (2018) for a comparison with their approach on their task. In the second experiment, we evaluate how well our model generalizes to texts from different authors not included in the training data. To this end, we extract PoS tag inversions from the GerDra-Cor corpus (Fischer et al., 2019) and retrieve the top-scoring 100 candidates using our pre-trained classifier. These results are evaluated manually to assess the precision among the top 100 results.",
"cite_spans": [
{
"start": 697,
"end": 719,
"text": "(Fischer et al., 2019)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "4"
},
{
"text": "For PoS tagging, dependencies, and word embeddings we use the de_core_news_lg model from spaCy (Honnibal et al., 2020) . For the English data from Dubremetz and Nivre (2018), we use the spaCy en_core_web_lg model. To create syntax trees, we used CoreNLP (Manning et al., 2014) .",
"cite_spans": [
{
"start": 95,
"end": 118,
"text": "(Honnibal et al., 2020)",
"ref_id": null
},
{
"start": 254,
"end": 276,
"text": "(Manning et al., 2014)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "4"
},
{
"text": "Feature Combinations Our dataset 2 consists of four annotated texts by Friedrich Schiller, \"Die Piccolomini\", \"Wallensteins Lager\", \"Wallensteins Tod\", and \"Wilhelm Tell\". We annotated the whole texts, finding 45 general chiasmi and 9 antimetaboles. To obtain negative examples, we randomly chose 4000 PoS tag inversions from the 1,006,487 inversions in these texts. Table 1 shows the AP of 5-fold cross-validation conducted with both the antimetaboles, chiasmi, and a combination of both as positive examples. It can be seen that all our additional features improve over the Dubremetz features alone, which are designed for finding antimetaboles in lemma inversions. Adding our novel features improves the detection of general chiasmi in PoS tag inversions. The combination of Dubremetz and lexical features improves the detection especially for antimetaboles. This shows that the lexical features include lemma information that is not present a priori with PoS tag inversions, including the information in the classifier, not in the candidate choice. When using only the novel features, the combination of both improves the results.",
"cite_spans": [],
"ref_spans": [
{
"start": 367,
"end": 374,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Experiments",
"sec_num": "4"
},
{
"text": "When we only search for chiasmi and exclude the antimetabole examples from the data, the combination of Dubremetz and embedding features yields the best results. Both the combinations of either embedding or all new features with the Dubremetz features improve over the baseline. However, in contrast to the antimetaboles-only experiment, the embedding features bring a stronger improvement than the rest. The combination with lexical features even slightly decreases the AP. This can also be seen when using only the novel features.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "4"
},
{
"text": "For the experiment with antimetaboles and chiasmi both considered as positive, the combination of the baseline and both novel feature sets or of the baseline and embedding features improves AP the most. The baseline features with the embedding features yield a better performance than the combination with the lexical features, both improving over the baseline. Lexical features are more important for Antimetabole detection, while embedding features help with the general chiasmus detection.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "4"
},
{
"text": "In addition, we tested our approach on English data used by Dubremetz and Nivre (2018) . This data consist of annotated candidates based on lemmata instead of PoS tags, so only the antimetabole detection can be tested. Since the novel features do not introduce additional information for candidates based on lemma inversions, the new features do not improve the detection. We also tested the approach by Java (2015) on the Combined candidates, yielding a precision of 0.2% and a recall of 5.6%, compared to 10% and 69% with our DLE approach. This shows that in their approach most positive examples are discarded, while due to the missing filtering step many false positives occur.",
"cite_spans": [
{
"start": 60,
"end": 86,
"text": "Dubremetz and Nivre (2018)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "4"
},
{
"text": "Chiasmi in Unseen Texts Since we have shown that both the novel lexical and embedding features and their combination can help with the task of antimetabole and general chiasmus detection, we evaluate the generalization performance of our chiasmus classifier trained on the four annotated Schiller dramas to other texts. The first set of texts comprises seven other dramas by Friedrich Schiller, which contain a total of 2,822,313 PoS tag inversions and 80,606 lemma inversions. To see how well our method generalizes to different authors, we tested it on the remaining 493 documents from GerDraCor. For evaluation, we annotated the topscoring 100 candidates manually.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "4"
},
{
"text": "We compare the baseline Dubremetz method based on lemma inversions with the full combination of the Dubremetz, lexical, and embedding features based on PoS tag inversions. Table 2 shows the results of this experiment. We can see that the precision among the top 100 rated candidates improved from 11% to 18% for the Schiller dramas and from 13% to 35% for the rest of the dataset. Interestingly, even the lemma-based baseline method finds some chiasmi. This is mainly due to some words with a common lemma, for example, sich can take the forms of mich and dich in the sentence and thus carry a chiasmus. We furthermore observe that the Dubremetz features together with the PoS tag inversions yield less positive examples than with lemma inversions. Since the number of PoS tag inversions is vastly higher than the number of lemma inversions, this is a much harder problem when the same feature set is used. The results show that our additional novel features are not only able to bridge this gap but also find substantially more chiasmi than the baseline.",
"cite_spans": [],
"ref_spans": [
{
"start": 172,
"end": 179,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Experiments",
"sec_num": "4"
},
{
"text": "The number of chiasmi compared to the number of PoS tag inversions in a text is extremely small; in our annotated dataset from the previous experiment, only 0.005% of the inversions were chiasmi or antimetaboles. Thus, our approach is suited to find these stylistic devices.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "4"
},
{
"text": "Since our candidate mining approach also allows all four PoS tags to be identical (i.e., A A A A instead of A B B A), we found that many of the false positives among the top 100 displayed another stylistic device: the parallelism.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "4"
},
{
"text": "We present a novel machine learning approach for a problem seldom addressed: the detection of general chiasmi. First, we extend the candidate mining scheme used by methods for the special case of antimetabole detection to general chiasmi by searching for inversions in PoS tags instead of lemmata. We then propose two new sets of lexical and embedding features, which encode information about the lemmata and word semantics.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "Our experiments show that our approach outperforms the state of the art. Especially the semantic embedding features proved useful for detecting chiasmi and antimetaboles. Our approach still falls short of human performance, which is expected, as the number of chiasmi in a text is extremely low compared to the number of potential chiasmus candidates.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "An interesting next research step is to test the model on different languages without retraining. Also, applying the method to search for parallelisms using an A B A' B' pattern seems promising.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "Code: https://git.io/DetectChiasmus 2 Data: https://git.io/ChiasmusData",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Enriching word vectors with subword information",
"authors": [
{
"first": "Piotr",
"middle": [],
"last": "Bojanowski",
"suffix": ""
},
{
"first": "Edouard",
"middle": [],
"last": "Grave",
"suffix": ""
},
{
"first": "Armand",
"middle": [],
"last": "Joulin",
"suffix": ""
},
{
"first": "Tomas",
"middle": [],
"last": "Mikolov",
"suffix": ""
}
],
"year": 2017,
"venue": "Transactions of the Association for Computational Linguistics",
"volume": "5",
"issue": "",
"pages": "135--146",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Delta': a Measure of Stylistic Difference and a Guide to Likely Authorship",
"authors": [
{
"first": "John",
"middle": [],
"last": "Burrows",
"suffix": ""
}
],
"year": 2002,
"venue": "Literary and Linguistic Computing",
"volume": "17",
"issue": "3",
"pages": "267--287",
"other_ids": {
"DOI": [
"10.1093/llc/17.3.267"
]
},
"num": null,
"urls": [],
"raw_text": "John Burrows. 2002. 'Delta': a Measure of Stylistic Dif- ference and a Guide to Likely Authorship. Literary and Linguistic Computing, 17(3):267-287.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Problematische Autorschaft. Zu ersten Anwendungsversuchen stilometrischer Methoden im Rahmen der Edition der Deutschen Politischen Schriften Zincgrefs. Magazin f\u00fcr digitale Editionswissenschaften",
"authors": [
{
"first": "Friedrich",
"middle": [],
"last": "Michael Dimpel",
"suffix": ""
},
{
"first": "Victoria",
"middle": [],
"last": "Gutsche",
"suffix": ""
},
{
"first": "Ren\u00e9",
"middle": [],
"last": "Wundke",
"suffix": ""
}
],
"year": 2016,
"venue": "",
"volume": "2",
"issue": "",
"pages": "23--32",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Friedrich Michael Dimpel, Victoria Gutsche, and Ren\u00e9 Wundke. 2016. Problematische Autorschaft. Zu er- sten Anwendungsversuchen stilometrischer Metho- den im Rahmen der Edition der Deutschen Politis- chen Schriften Zincgrefs. Magazin f\u00fcr digitale Edi- tionswissenschaften, 2:23-32.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Rhetorical figure detection: the case of chiasmus",
"authors": [
{
"first": "Marie",
"middle": [],
"last": "Dubremetz",
"suffix": ""
},
{
"first": "Joakim",
"middle": [],
"last": "Nivre",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the Fourth Workshop on Computational Linguistics for Literature",
"volume": "",
"issue": "",
"pages": "23--31",
"other_ids": {
"DOI": [
"10.3115/v1/W15-0703"
]
},
"num": null,
"urls": [],
"raw_text": "Marie Dubremetz and Joakim Nivre. 2015. Rhetori- cal figure detection: the case of chiasmus. In Pro- ceedings of the Fourth Workshop on Computational Linguistics for Literature, pages 23-31, Denver, Col- orado, USA. Association for Computational Linguis- tics.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Machine learning for rhetorical figure detection: More chiasmus with less annotation",
"authors": [
{
"first": "Marie",
"middle": [],
"last": "Dubremetz",
"suffix": ""
},
{
"first": "Joakim",
"middle": [],
"last": "Nivre",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the 21st Nordic Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "37--45",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marie Dubremetz and Joakim Nivre. 2017. Machine learning for rhetorical figure detection: More chi- asmus with less annotation. In Proceedings of the 21st Nordic Conference on Computational Linguis- tics, pages 37-45, Gothenburg, Sweden. Association for Computational Linguistics.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Rhetorical figure detection: Chiasmus, epanaphora, epiphora",
"authors": [
{
"first": "Marie",
"middle": [],
"last": "Dubremetz",
"suffix": ""
},
{
"first": "Joakim",
"middle": [],
"last": "Nivre",
"suffix": ""
}
],
"year": 2018,
"venue": "Frontiers in Digital Humanities",
"volume": "5",
"issue": "",
"pages": "",
"other_ids": {
"DOI": [
"10.3389/fdigh.2018.00010"
]
},
"num": null,
"urls": [],
"raw_text": "Marie Dubremetz and Joakim Nivre. 2018. Rhetorical figure detection: Chiasmus, epanaphora, epiphora. Frontiers in Digital Humanities, 5:10.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Alexandre Dumas. 1844. Les trois mousquetaires",
"authors": [
{
"first": "Le",
"middle": [],
"last": "Si\u00e8cle",
"suffix": ""
}
],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alexandre Dumas. 1844. Les trois mousquetaires. Le Si\u00e8cle.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Historisches W\u00f6rterbuch der Rhetorik",
"authors": [
{
"first": "Markus",
"middle": [],
"last": "Fauser",
"suffix": ""
}
],
"year": 1994,
"venue": "",
"volume": "2",
"issue": "",
"pages": "171--173",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Markus Fauser. 1994. [Art.] Chiasmus, in Historisches W\u00f6rterbuch der Rhetorik, volume 2, pages 171-173.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Programmable corpora: Introducing dracor, an infrastructure for the research on european drama",
"authors": [
{
"first": "Frank",
"middle": [],
"last": "Fischer",
"suffix": ""
},
{
"first": "Ingo",
"middle": [],
"last": "B\u00f6rner",
"suffix": ""
},
{
"first": "Mathias",
"middle": [],
"last": "G\u00f6bel",
"suffix": ""
},
{
"first": "Angelika",
"middle": [],
"last": "Hechtl",
"suffix": ""
},
{
"first": "Christopher",
"middle": [],
"last": "Kittel",
"suffix": ""
},
{
"first": "Carsten",
"middle": [],
"last": "Milling",
"suffix": ""
},
{
"first": "Peer",
"middle": [],
"last": "Trilcke",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"DOI": [
"10.5281/zenodo.4284002"
]
},
"num": null,
"urls": [],
"raw_text": "Frank Fischer, Ingo B\u00f6rner, Mathias G\u00f6bel, Angelika Hechtl, Christopher Kittel, Carsten Milling, and Peer Trilcke. 2019. Programmable corpora: Introducing dracor, an infrastructure for the research on european drama.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Automated annotation and visualization of rhetorical figures",
"authors": [],
"year": 2009,
"venue": "Jakub",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jakub Jan Gawryjolek. 2009. Automated annotation and visualization of rhetorical figures.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Sofie Van Landeghem, and Adriane Boyd. 2020. spaCy: Industrialstrength Natural Language Processing in Python",
"authors": [
{
"first": "Matthew",
"middle": [],
"last": "Honnibal",
"suffix": ""
},
{
"first": "Ines",
"middle": [],
"last": "Montani",
"suffix": ""
}
],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"DOI": [
"10.5281/zenodo.1212303"
]
},
"num": null,
"urls": [],
"raw_text": "Matthew Honnibal, Ines Montani, Sofie Van Lan- deghem, and Adriane Boyd. 2020. spaCy: Industrial- strength Natural Language Processing in Python.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Der Autor ganz nah -Autorstil in Stilistik und Stilometrie",
"authors": [
{
"first": "Fotis",
"middle": [],
"last": "Jannidis",
"suffix": ""
}
],
"year": 2014,
"venue": "Theorien und Praktiken der Autorschaft",
"volume": "",
"issue": "",
"pages": "169--195",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Fotis Jannidis. 2014. Der Autor ganz nah -Autorstil in Stilistik und Stilometrie, in Theorien und Praktiken der Autorschaft, pages 169-195. de Gruyter, Berlin.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Characterization of Prose by Rhetorical Structure for Machine Learning Classification",
"authors": [
{
"first": "James",
"middle": [],
"last": "Java",
"suffix": ""
}
],
"year": 2015,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "James Java. 2015. Characterization of Prose by Rhetor- ical Structure for Machine Learning Classification. Ph.D. thesis, Nova Southeastern University.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Harnessing rhetorical figures for argument mining",
"authors": [
{
"first": "John",
"middle": [],
"last": "Lawrence",
"suffix": ""
},
{
"first": "Jacky",
"middle": [],
"last": "Visser",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"DOI": [
"10.3233/AAC-170026"
]
},
"num": null,
"urls": [],
"raw_text": "John Lawrence, Jacky Visser, and Chris Reed. 2017. Harnessing rhetorical figures for argument mining.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "The stanford corenlp natural language processing toolkit",
"authors": [
{
"first": "Christopher",
"middle": [
"D"
],
"last": "Manning",
"suffix": ""
},
{
"first": "Mihai",
"middle": [],
"last": "Surdeanu",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Bauer",
"suffix": ""
},
{
"first": "Jenny",
"middle": [
"Rose"
],
"last": "Finkel",
"suffix": ""
},
{
"first": "Steven",
"middle": [],
"last": "Bethard",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Mc-Closky",
"suffix": ""
}
],
"year": 2014,
"venue": "ACL (System Demonstrations)",
"volume": "",
"issue": "",
"pages": "55--60",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christopher D. Manning, Mihai Surdeanu, John Bauer, Jenny Rose Finkel, Steven Bethard, and David Mc- Closky. 2014. The stanford corenlp natural language processing toolkit. In ACL (System Demonstrations), pages 55-60. The Association for Computer Linguis- tics.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Efficient estimation of word representations in vector space",
"authors": [
{
"first": "Tom\u00e1s",
"middle": [],
"last": "Mikolov",
"suffix": ""
},
{
"first": "Kai",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Greg",
"middle": [],
"last": "Corrado",
"suffix": ""
},
{
"first": "Jeffrey",
"middle": [],
"last": "Dean",
"suffix": ""
}
],
"year": 2013,
"venue": "1st International Conference on Learning Representations",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tom\u00e1s Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word representa- tions in vector space. In 1st International Conference on Learning Representations, ICLR 2013, Scottsdale, Arizona, USA, May 2-4, 2013, Workshop Track Pro- ceedings.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Ontological representations of rhetorical figures for argument mining",
"authors": [
{
"first": "Jelena",
"middle": [],
"last": "Mitrovi\u0107",
"suffix": ""
},
{
"first": "O'",
"middle": [],
"last": "Cliff",
"suffix": ""
},
{
"first": "Miljana",
"middle": [],
"last": "Reilly",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Mladenovi\u0107",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"DOI": [
"10.3233/AAC-170027"
]
},
"num": null,
"urls": [],
"raw_text": "Jelena Mitrovi\u0107, Cliff O'Reilly, Miljana Mladenovi\u0107, and Siegfried Handschuh. 2017. Ontological repre- sentations of rhetorical figures for argument mining.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Mining millions of metaphors. Literary and Linguistic Computing",
"authors": [
{
"first": "Brad",
"middle": [],
"last": "Pasanek",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Sculley",
"suffix": ""
}
],
"year": 2008,
"venue": "",
"volume": "23",
"issue": "",
"pages": "345--360",
"other_ids": {
"DOI": [
"10.1093/llc/fqn010"
]
},
"num": null,
"urls": [],
"raw_text": "Brad Pasanek and D. Sculley. 2008. Mining millions of metaphors. Literary and Linguistic Computing, 23(3):345-360.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "A vector space model for automatic indexing",
"authors": [
{
"first": "G",
"middle": [],
"last": "Salton",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Wong",
"suffix": ""
},
{
"first": "C",
"middle": [
"S"
],
"last": "Yang",
"suffix": ""
}
],
"year": 1975,
"venue": "Commun. ACM",
"volume": "18",
"issue": "11",
"pages": "613--620",
"other_ids": {
"DOI": [
"10.1145/361219.361220"
]
},
"num": null,
"urls": [],
"raw_text": "G. Salton, A. Wong, and C. S. Yang. 1975. A vector space model for automatic indexing. Commun. ACM, 18(11):613-620.",
"links": null
}
},
"ref_entries": {
"TABREF2": {
"content": "<table/>",
"num": null,
"text": "Number of correct examples among the top 100 ranked ones in unseen texts for the Dubremetz method baseline, the PoS inversions with Dubremetz features and the Dubremetz+lexical+embedding (DLE) features.",
"type_str": "table",
"html": null
}
}
}
}