|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:21:42.922913Z" |
|
}, |
|
"title": "A Hybrid Rule-Based and Neural Coreference Resolution System with an Evaluation on Dutch Literature", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Van Cranenburgh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Groningen", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Esther", |
|
"middle": [], |
|
"last": "Ploeger", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Groningen", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Van Den Berg", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Groningen", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Remi", |
|
"middle": [], |
|
"last": "Th\u00fcss", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Groningen", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We introduce a modular, hybrid coreference resolution system that extends a rule-based baseline with three neural classifiers for the subtasks mention detection, mention attributes (gender, animacy, number), and pronoun resolution. The classifiers substantially increase coreference performance in our experiments with Dutch literature across all metrics on the development set: mention detection, LEA, CoNLL, and especially pronoun accuracy. However, on the test set, the best results are obtained with rule-based pronoun resolution. This inconsistent result highlights that the rulebased system is still a strong baseline, and more work is needed to improve pronoun resolution robustly for this dataset. While end-to-end neural systems require no feature engineering and achieve excellent performance in standard benchmarks with large training sets, our simple hybrid system scales well to long document coreference (>10k words) and attains superior results in our experiments on literature.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We introduce a modular, hybrid coreference resolution system that extends a rule-based baseline with three neural classifiers for the subtasks mention detection, mention attributes (gender, animacy, number), and pronoun resolution. The classifiers substantially increase coreference performance in our experiments with Dutch literature across all metrics on the development set: mention detection, LEA, CoNLL, and especially pronoun accuracy. However, on the test set, the best results are obtained with rule-based pronoun resolution. This inconsistent result highlights that the rulebased system is still a strong baseline, and more work is needed to improve pronoun resolution robustly for this dataset. While end-to-end neural systems require no feature engineering and achieve excellent performance in standard benchmarks with large training sets, our simple hybrid system scales well to long document coreference (>10k words) and attains superior results in our experiments on literature.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "This paper reports on a hybrid rule-based and neural coreference resolution system 1 evaluated on Dutch literary texts. We use neural classifiers for the following three subtasks:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1. Mention span identification; 2. Mention attributes: gender, animacy, number; 3. Pronoun resolution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "These subtasks have been selected based on the expected return on investment given the particular weaknesses of the rule-based model (Lee et al., 2017a) and specific challenges of literary coreference (R\u00f6siger et al., 2018) . To keep the approach as simple as possible, we implement these classifiers as independent modules operating in a pipeline.", |
|
"cite_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 152, |
|
"text": "(Lee et al., 2017a)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 223, |
|
"text": "(R\u00f6siger et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The classifiers can be trained on a laptop without GPU in ten minutes, and are therefore substantially less resource-intensive than state-of-the-art neural models; e.g., SpanBERT (Joshi et al., 2020) requires pre-training a BERT model with span representations on specialized hardware (TPU); moreover, end-to-end neural coreference resolution systems are generally memory and CPU intensive, especially when longer contexts are taken into account (Toshniwal et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 199, |
|
"text": "(Joshi et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 446, |
|
"end": 470, |
|
"text": "(Toshniwal et al., 2020)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The output of coreference resolution is a set of mention spans, partitioned into clusters (example based on Rudinger et al., 2018) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 130, |
|
"text": "Rudinger et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(1) [ ", |
|
"cite_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 5, |
|
"text": "[", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Rule-based coreference resolution provides a reasonable baseline (Lee et al., 2011 (Lee et al., , 2013 , and its advantages are that it can exploit global features of entities based on the whole document. In contrast to end-to-end systems, information from parse trees and named-entity recognition can be used, as well as other components from the Natural Language Processing (NLP) pipeline. Feature-based models also use the NLP pipeline, but use machine learning classifiers that make local decisions (mention-pair and mention-ranking architectures), or attempt to take global context into account, but this runs into computational challenges with long documents. End-to-end neural systems do not need the NLP pipeline and are able to optimize all steps of coreference resolution jointly, which has enabled large advances in standard benchmarks (Lee et al., 2017b (Lee et al., , 2018 Wu et al., 2020) . However, there are several challenges with end-to-end neural models: long documents with long coreference chains (Joshi et al., 2019; Toshniwal et al., 2020) , domain and annotation differences across datasets (Zhu et al., 2021; Poot and van Cranenburgh, 2020) , and needing a large number of training examples (Shalev-Shwartz and Shashua, 2016; Glasmachers, 2017) . Moreover, gender bias is a general challenge in coreference resolution systems (Rudinger et al., 2018; Webster et al., 2018) . Each of these areas is potentially easier to address with a well engineered rule-based or feature-based approach to coreference resolution, and we therefore choose to explore this direction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 82, |
|
"text": "(Lee et al., 2011", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 83, |
|
"end": 102, |
|
"text": "(Lee et al., , 2013", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 847, |
|
"end": 865, |
|
"text": "(Lee et al., 2017b", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 866, |
|
"end": 885, |
|
"text": "(Lee et al., , 2018", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 886, |
|
"end": 902, |
|
"text": "Wu et al., 2020)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1018, |
|
"end": 1038, |
|
"text": "(Joshi et al., 2019;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1039, |
|
"end": 1062, |
|
"text": "Toshniwal et al., 2020)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1115, |
|
"end": 1133, |
|
"text": "(Zhu et al., 2021;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1134, |
|
"end": 1165, |
|
"text": "Poot and van Cranenburgh, 2020)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1216, |
|
"end": 1250, |
|
"text": "(Shalev-Shwartz and Shashua, 2016;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1251, |
|
"end": 1269, |
|
"text": "Glasmachers, 2017)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1351, |
|
"end": 1374, |
|
"text": "(Rudinger et al., 2018;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1375, |
|
"end": 1396, |
|
"text": "Webster et al., 2018)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Hybrid coreference resolution systems have been presented before; Lee et al. (2017a) present a system in which most steps of the rule-based system are implemented with random forest classifiers. They obtain improvements in accuracy and efficiency, but neural systems have since eclipsed these results. Their classifiers include mention detection and pronoun resolution, which we also pursue in this work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 84, |
|
"text": "Lee et al. (2017a)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In addition, previous work shows that neural representations and surface features have complementary strengths (Moosavi and Strube, 2017) . This is another sense in which our system is hybrid: we use both manually selected features as well as contextualized word embeddings. Parts of the neural architecture and features are inspired by Clark and Manning (2016), but we use BERT (Devlin et al., 2019) for embedding features instead of static word embeddings, since BERT representations have shown to bring about significant improvements in natural language tasks that rely on the context. There has been some work on improving detection of mention attributes (animacy, gender, number) using external datasets and machine learning. Bergsma and Lin (2006) extract attributes from a large corpus with dependency parses using heuristic patterns. Orasan and Evans (2007) focus on animacy and use Wordnet and SemCor combined with machine learning. These methods aim to learn general patterns for detecting attributes of noun phrases. In contrast, we will annotate attributes of the entities in our coreference corpus in context and train the classifier on those annotations. We hope to handle more difficult, ambiguous cases which require context with this approach.", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 137, |
|
"text": "(Moosavi and Strube, 2017)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 379, |
|
"end": 400, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 731, |
|
"end": 753, |
|
"text": "Bergsma and Lin (2006)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 842, |
|
"end": 865, |
|
"text": "Orasan and Evans (2007)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Although most coreference resolution systems are trained and evaluated on domains contained in benchmark datasets, such as news texts and phone conversations in the case of OntoNotes, we train and evaluate our hybrid system on Dutch literature. The reason we are interested in the literary domain is that, while literary texts are increasingly subject to computational analysis in the field of digital humanities, there is still a lot of work required to adapt NLP models to the literary domain, of which coreference resolution is a particularly challenging instance. Importantly, the literary domain contains unique characteristics, such as long coreference chains and dialogue, which do not appear in typical benchmark data for coreference resolution (R\u00f6siger et al., 2018; Bamman et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 753, |
|
"end": 775, |
|
"text": "(R\u00f6siger et al., 2018;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 776, |
|
"end": 796, |
|
"text": "Bamman et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We use RiddleCoref (van Cranenburgh, 2019), with the same train/dev/test splits as used in Poot and van Cranenburgh (2020) . The corpus consists of 162k tokens of contemporary (2007) (2008) (2009) (2010) (2011) (2012) bestselling novels in Dutch (translated and original), with a total of 33 documents (fragments of novels), and an average of 4897.4 tokens per document. The entity coreference annotations follow the dutchcoref annotation guidelines. The 38,466 mentions in the corpus have been manually corrected and exclude non-referring expressions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 122, |
|
"text": "Poot and van Cranenburgh (2020)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 182, |
|
"text": "(2007)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 183, |
|
"end": 189, |
|
"text": "(2008)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 196, |
|
"text": "(2009)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 203, |
|
"text": "(2010)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 210, |
|
"text": "(2011)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 217, |
|
"text": "(2012)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We did an additional round of corrections on the whole corpus, mostly to fix mention boundaries to exclude relative clauses and remove nonreferring expressions (idioms, verbal expressions, negated mentions). We also made small improvements to the mention detection of the rule-based system: bare nouns in conjunctions are extracted as mentions, and subordinate clauses are removed from mention spans, in accordance with the annotation guidelines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Three mention attributes, namely animacy, gender, and number, have been manually annotated for each of the 11,684 entities in the training and development sets. The gender attribute has four possible values: f (female), m (male), fm (unknown or mixed gender), and n (neuter, non-human). Any gender except neuter implies a human (person) entity; the animacy attribute is therefore implied. Note that Dutch has noun classes with grammatically gendered and neuter words; however, our annotations concern the gender with which individuals are identified. For example, the noun phrase het meisje (the girl) is grammatically neuter, but annotated as female, since it would be referred to by female pronouns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The number attribute has two possible values: sg (singular) and pl (plural; an entity consisting of multiple individuals/objects). We annotate the semantic number (e.g., \"the group\" is plural since it is a collective noun that could be referred to by \"they\"), regardless of the syntactic number.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The base system is a rule-based coreference resolution system (van Cranenburgh, 2019) which takes parse trees as input. We extend this system with neural classifiers for three subtasks; see Figure 1 for an overview of our hybrid system.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 198, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The rule-based system starts by extracting mention candidates from parse trees based on rules. Mention attributes are heuristically assigned based on parse tree features and lexical resources. Mentions are then linked into entity clusters using several \"sieves\" for linking nominals, names, and finally pronouns. The pronoun resolution step is an implementation of the Hobbs (1978) system using heuristics of recency and syntactic prominence. We use a feed-forward neural network classifier for the three subtasks (see Figure 2 ). The input consists of BERT token embeddings and several handpicked features. The network has two dense hidden layers with 500 and 150 neurons, respectively, both with ReLu activation and batch normalization. The output layer is a sigmoid function with the respective binary classification for the subtask and L 2 regularization of 0.05. We apply a dropout of 0.2 to the input layer and 0.5 to each hidden layer and fit the networks with a batch size of 32 and Adam with a learning rate of 0.0001. Each subtask is trained with early stopping until there are 5 suc- ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 519, |
|
"end": 527, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Rule-based system", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "ReLU(W1h0 + b1) ReLU(W2h1 + b2) ReLU(W3h2 + b3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rule-based system", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Pair and Document Features Figure 2 : The mention-pair encoder for the pronoun resolution model; the other modules take a single mention as input; figure from Clark and Manning (2016) cessive epochs that do not show an improvement on the validation set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 183, |
|
"text": "Clark and Manning (2016)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 35, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Rule-based system", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "BERT embeddings are produced using the monolingual, pre-trained BERTje model (de Vries et al., 2019) . We use the BERT token embeddings from layer 9, since that layer was shown to perform best for the task of coreference resolution in Dutch (de Vries et al., 2020). For mentions consisting of multiple BERT tokens, we use the mean of the embeddings of all tokens as the mention representation. While neural systems (e.g., Lee et al., 2017b; Bamman et al., 2020) often use a recurrent layer (e.g., LSTM) to obtain contextualized representations of mentions, we follow Joshi et al. (2019) in using BERT embeddings directly. Unlike Joshi et al. (2019), we do not encode BERT embeddings for segments of multiple sentences, but encode each sentence independently.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 100, |
|
"text": "(de Vries et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 440, |
|
"text": "Lee et al., 2017b;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 461, |
|
"text": "Bamman et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rule-based system", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To improve mention detection, we implement a mention span classifier that picks the best mention span from a list of candidates for a given head word (similar to Lee et al., 2017a) , or classifies the spans as non-referring if none of them have a probability higher than a threshold (set at > 0.3 in our experiments based on experiments with development data).", |
|
"cite_spans": [ |
|
{ |
|
"start": 162, |
|
"end": 180, |
|
"text": "Lee et al., 2017a)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Span Classifier", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Candidates are extracted based on the same syntactic rules as in van Cranenburgh (2019), but include alternative, shorter spans as candidates as well. Since mention spans that incorrectly include an adverb in the first position have been observed frequently in previous work (van Cranenburgh, 2019), we ensure that for each span (n, m), the span (n+1, m) is also considered. The system is trained on gold mention spans annotated in the corpus, as well as negative examples extracted from the parse trees. During evaluation and prediction, only spans extracted by rules are used as candidates. While it would be possible to train a classifier that works with arbitrary spans as candidates, the rest of the rule-based system depends on parse tree features, and we therefore stick with candidates extracted by parse tree queries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Span Classifier", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The features presented to the neural network are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Span Classifier", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "1. The BERT token embeddings of the first and last token of the span; 2. Whether the rule-based mention detection would extract the span as mention; 3. The grammatical function of the constituent matching the span (subject, object, predicative, apposition); 4. Whether the span contains another NP; 5. Whether the head word of the span is a named entity (PER/LOC/ORG/MISC); 6. The POS tag of the head word (noun, name, pronoun, or verb), the first word (adverb, adjective, punctuation) and the last word (punctuation); 7. The number of words in the mention, histogrammed as in Clark and Manning (2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 577, |
|
"end": 601, |
|
"text": "Clark and Manning (2016)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Span Classifier", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Given the input of example (1), the candidates are (correct mention underlined): {De chirurg, chirurg}, {de pati\u00ebnt, pati\u00ebnt}, . . . , {mijn zoon, zoon} We also experimented with adding an anaphoricity classifier (e.g., Clark and Manning, 2016; Moosavi and Strube, 2017) , but initial experiments did not improve the results, so we leave this for future work. Moreover, mentions could also be classified as singleton or coreferent; however, following Lee et al. (2017a), we have not pursued this, since it is better to leave this decision to later sieves, at which point more global information is available.", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 244, |
|
"text": "Clark and Manning, 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 245, |
|
"end": 270, |
|
"text": "Moosavi and Strube, 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Span Classifier", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The rule-based system (van Cranenburgh, 2019) detects mention attributes heuristically using parse tree features and several lexicons and lexical resources:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Attributes Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "1. Named-entity category and grammatical features; 2. a list of the most common Dutch first names for men and women; 3. gender and animacy attributes for nouns from the Dutch Wordnet equivalent Cornetto;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Attributes Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "4. and heuristic number and gender frequencies derived from English web text (Bergsma and Lin, 2006) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 100, |
|
"text": "(Bergsma and Lin, 2006)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Attributes Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "To improve the detected attributes, we train a supervised classifier that predicts these attributes for a given mention in a sentence. We manually annotated the mention attributes for each entity in the corpus based on the whole coreference chain. During training, we train and predict these entity attributes for each mention. This means that some data points will be difficult, e.g., predicting the gender of the mention \"the person\" is not possible without further context. Similarly, ze is both a third person singular female pronoun as well as a third person plural pronoun; when not in subject position, gender and number are ambiguous. In early experiments, attributes were only predicted for names and nominals; however, predicting attributes for all mention types (i.e., including pronouns) substantially improved performance. Furthermore, annotating and predicting number, despite being relatively reliably marked syntactically, also boosts performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Attributes Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The task is set up as a multi-label classification task such that a mention is assigned probabilities for all possible labels; this multi-task setup means that attributes are trained and predicted jointly. For each attribute, we assign all labels with a probability > 0.5. Experimenting with different thresholds did not improve results. Given this setup, it is possible for the classifier to predict no attributes for a mention, which is interpreted as the attributes being unknown by the system; or a combination of features such as female and neuter, which is not part of the annotations, this is again interpreted as an uncertain feature by the rule-based system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Attributes Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The features from which the neural network predicts mention attributes are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Attributes Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "1. The averaged BERT token embeddings for the mention; 2. The heuristically detected attributes for gender, animacy, and number; 3. Whether the mention is a subject or object; 4. Whether the mention contains another NP.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Attributes Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Given the input of (1), the expected output is: De chirurg: fm, sg; de pati\u00ebnt: fm, sg; Hij: m, sg; haar f, sg; haar zoon: m, sg. However, based on the context, the predicted gender of the first two mentions could be more specific. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Attributes Classifier", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We train a binary classifier on predicting whether a pair of mentions is coreferent; i.e., a mention-pair architecture (Soon et al., 2001) . Pairs consist of a pronoun and antecedent candidate. The pronoun is a third person personal, possessive, indefinite, or demonstrative pronoun. The antecedent candidate is a mention within the preceding 22 mentions words relative to the pronoun (this distance is applied during both training and prediction). Mentions with a grammatical function of appositive or determiner are not considered as candidates, since these often lead to incorrect links. We also filter out mention pairs based on binding constraints (iwithin-i and co-argument restrictions). Mention pairs are assigned a probability. For each pronoun, the candidate with the highest probability is selected as its antecedent, unless the highest probability is less than a threshold, in which case no antecedent is selected. Based on experiments with the development data, we set the threshold at 0.2. The features given to the neural network are as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 138, |
|
"text": "(Soon et al., 2001)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pronoun Resolution Classifier", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "1. The averaged BERT token embeddings for the pronoun, and for the candidate; 2. Candidate mention type (pronoun, noun, name); 3. Whether the grammatical function (subj, obj, etc.) of the pair is the same; 4. Attribute compatibility (gender, animacy, number); 5. Person (1, 2, 3) of candidate, if it is a pronoun; 6. Whether pronoun or candidate occurs in quoted speech; 7. Distance in sentences and words between pronoun and candidate; number of words in candidate. The distances and lengths are histogrammed as in Clark and Manning (2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 516, |
|
"end": 540, |
|
"text": "Clark and Manning (2016)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pronoun Resolution Classifier", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Given the input of (1), the candidates are (correct antecedent underlined) noun type, position in sentence) actually decreased development scores in our experiments, which is why we end up with this relatively small list of features. We also considered frequency features: how frequent is the candidate entity in the preceding context or whole document. We have not pursued this since it complicates the implementation as it makes predictions dependent on previous predictions. Another feature which is left for future work is incorporating external knowledge on selectional preferences, as used successfully by Zhang et al. (2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 612, |
|
"end": 631, |
|
"text": "Zhang et al. (2019)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pronoun Resolution Classifier", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We first report the results for each module on the development set, and then report the results for the system in various configurations on the development and test sets. The results for the mention span classifier are shown in Table 1 . We obtain a decent improvement over the rule-based method: a difference of 1.4% F1 points for mentions, mainly due to higher precision. The mention recall is limited by the rules for mention candidate extraction and parse tree errors.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 235, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The results for the mention attributes classifier are shown in Table 2 . We obtain a solid improvement over the baseline, with a macro averaged F1 improvement of 8.3% points and consistent improvement for each label. Female mentions show the largest improvement, but also remain the most difficult to detect. There is a striking contrast with male and neuter mentions, which show higher scores. Animacy detection is also improved substantially, and number to a lesser extent, since the baseline is already high for this attribute.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 70, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For coreference evaluation, we use the averaged CoNLL score (Pradhan et al., 2011) and the LEA coreference metric (Moosavi and Strube, 2016; Moosavi et al., 2019) . In addition we report mention scores and pronoun accuracy. Pronoun accuracy includes demonstrative and indefinite pronouns in addition to third person personal and possessive pronouns.", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 82, |
|
"text": "(Pradhan et al., 2011)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 114, |
|
"end": 140, |
|
"text": "(Moosavi and Strube, 2016;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 141, |
|
"end": 162, |
|
"text": "Moosavi et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "See Table 3 for the main coreference results, presented incrementally. The original rule-based model is listed as \"dutchcoref\", with the modules proposed in this paper listed as span (mention span classifier), attr (mention attributes classifier), and pron (pronoun resolution), respectively. The line \"dutchcoref+span\" means that the mention span classifier is used, but the rest of the system remains rulebased. For transparency, we report results both on the development and test sets; the parameters and models were tuned only on the development set. Since the annotations and the rule-based system were improved, we report results from Poot and van Cranenburgh (2020) for comparison. Each neural module improves performance scores on the development set, across all metrics. Unfortunately, on the test set the results are less consistent. On several metrics, the rule-based \"dutchcoref\" performs best, while the pronoun resolution classifier does not improve the pronoun accuracy with respect to the previous line \"dutchcoref+span,attr\" with results for the rule-based model with neural mention detection and mention attributes, but rule-based pronoun resolution; however, the mention span and attributes modules perform well.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 11, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In order to isolate the effect of mention detection (which is known to introduce pipeline errors), we also perform an evaluation on the test set with gold mentions, see Table 4 . Here we find that the mention attribute classifier improves the performance across the board. Again the pronoun resolution module does not improve the results compared to the result for 'dutchcoref+attr'. (it does improve the results compared with the purely rule-based system).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 176, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "All our evaluations include singletons; evaluating without singletons does not change the ranking of the systems on each metric. We conclude that the mention attribute classifier robustly improves the performance, but that the pronoun resolution classifier yields inconsistent results. Finally, while the result is puzzling, a similar result was reported by Poot and van Cranenburgh (2020) , where the rule-based system performed better on the test set than on the development set, while the end-to-end neural system showed the opposite effect (better on development set than on test set).", |
|
"cite_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 389, |
|
"text": "Poot and van Cranenburgh (2020)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "It could be that the development and test set differ in difficulty. We consider several basic statistics to compare the two sets. We first consider differences in the out-of-vocabulary (OOV) rate and word frequencies with the Jensen-Shannon distance. We find that the development set actually has a lower OOV rate than the test set, with respect to the training set (16.3% and 13.3%, respectively).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis of Differences", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "The Jensen-Shannon distance shows the same pattern (0.307 vs 0.290, respectively). Moreover, the average sentence length is similar between the development and test sets (18.39 and 18.26, respectively), but higher than the train set (15.51). The development set does have a lower number of mentions (6548 vs 6869) and entities (2643 vs 3008). Finally, the development set has a higher percentage of names (14.9 vs 9.1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis of Differences", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Genre is another potential explanation for the difference. There are four different genres in the RiddleCoref dataset: (Literary) Fiction, Suspense, Romance, and Other. The development set contains 4 Fiction and 1 Other novel, while the test set contains 3 Fiction, 1 Romance, and 1 Suspense novel. We now take a closer look at the difficulty of these genres using an out-of-domain training set. We evaluate the pronoun resolution module on each genre in RiddleCoref; the results are in Table 5 . We evaluate on two novels for each genre. The two documents of the Other genre are three chapters from Harry Potter and The Hunger Games, and are therefore considerably longer than the other documents. Since these documents from varying genres all originate from the RiddleCoref training set, we train the pronoun resolution model on a different corpus: SoNar-1 (Schuurman et al., 2010) . This model achieved a CoNLL score of 70.76 on the RiddleCoref test set, which is comparable to that of the model trained on RiddleCoref. This is in line with our expectations: on the one hand, SoNaR-1 is much larger with 1 million tokens, providing more training data, but on the other hand, there is a difference in domain. Moreover, as noted by van Cranenburgh (2019) and Poot and van Cranenburgh (2020) scheme of RiddleCoref and SoNaR-1; however, for pronoun resolution, these differences do not prevent the model from achieving a decent score. Comparing the results for the different genres in Table 5 reveals that the genre Fiction resulted in the lowest scores and Suspense resulted in the best scores, the difference being 4.69 percentage points in the CoNLL score. It is quite noticeable that the genres with more tokens in this experiment performed worse. This is in line with the performance of the end-to-end neural model from Poot and van Cranenburgh (2020) where a similar effect was noticed. Furthermore there does not seem to be a clear correlation between the percentage of pronouns and the CoNLL score in this experiment. As some of the most common link errors involve pronouns, genres with more pronouns were expected to result in lower scores. This, however, does not seem to be the case in this sample, as the second best performing genre contained the highest percentage of pronouns and the worst performing genre contained the lowest percentage of pronouns. Lastly, the length of a sentence does not seem to have a substantial effect on the scores. A longer sentence could possibly be more complex with more mentions and therefore create more room for mistakes for the model, how- ever this does not seem to be the case here. The genre with the highest number of tokens per sentence actually performed the best. Figure 3 shows training curves for the pronoun resolution model (i.e., these results do not include the mention span and attribute modules), with predicted mentions. From the curve we see the benefit of more training data for pronoun resolution accuracy (and to a lesser extent for the overall CoNLL score). Since the training curve keeps rising without reaching a plateau, we expect that adding more training data will improve pronoun resolution more. Still, since the curve gradually becomes less steep with supplying more data, we expect that there will be diminishing returns as more annotated training data is added.", |
|
"cite_spans": [ |
|
{ |
|
"start": 859, |
|
"end": 883, |
|
"text": "(Schuurman et al., 2010)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1260, |
|
"end": 1291, |
|
"text": "Poot and van Cranenburgh (2020)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 487, |
|
"end": 494, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 1484, |
|
"end": 1491, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 2720, |
|
"end": 2728, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis of Differences", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "The results for the mention attribute classifier show that recognizing female mentions is most difficult. We suspect that the difficulty may lie in mentions that can be both male or female, in which the system may assume male as the most likely label. As a simple probe for gender bias, we experiment with the running example (1) which is a Winogender sentence (Rudinger et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 361, |
|
"end": 384, |
|
"text": "(Rudinger et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probing for gender bias", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Interestingly, the rule-based system correctly identifies De chirurg (the surgeon) as male or female (based on the Cornetto lexical resource), while the neural mention attribute classifier predicts it as male. How did this gender bias get introduced? The training data contains only one instance of chirurg, which is correctly annotated as male or female, since the context does not identify the surgeon's gender. Another potential source of gender bias is the BERT embeddings. If we present BERT with the following sentence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probing for gender bias", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "(2) De chirurg kon [MASK] pati\u00ebnt niet behandelen.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probing for gender bias", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "The surgeon couldn't treat [MASK] patient.", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 33, |
|
"text": "[MASK]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probing for gender bias", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We find that BERT considers de, zijn (the, his) as overwhelmingly most probable, with een, deze, die (a, this, that) as distant runner ups, but no female possessive pronoun in the top 5. We therefore conclude that the pre-trained BERTje model has introduced a source of gender bias, which is in line with previous results for Dutch (Ch\u00e1vez Mulsa and Spanakis, 2020) . Unless an effective bias mitigation technique is applied, this presents a dilemma: the goal is either to maximize overall accuracy, in which case for example the gender most commonly associated with an occupation is assumed, or gender bias is removed using constraints that lower overall performance. Moreover, while the mention attribute classifier mistakenly classifies De chirurg (the surgeon) as male, the neural pronoun resolution module ignores this misclassification, and correctly links haar. This demonstrates the advantage of the neural classifiers which exploit mention attributes as features, but do not treat them as hard constraints, as the rule-based pronoun resolution sieve does.", |
|
"cite_spans": [ |
|
{ |
|
"start": 332, |
|
"end": 365, |
|
"text": "(Ch\u00e1vez Mulsa and Spanakis, 2020)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probing for gender bias", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We have presented a hybrid coreference resolution system that extends a rule-based baseline with three simple neural classifiers. The classifiers substantially increase the coreference performance in our experiments on Dutch literature, except for pronoun resolution on the test set. The strongest improvements is on pronoun accuracy, which is especially important in longform narrative text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "There are several areas in which the system can be improved. In our approach we erred on the side of simplicity, but in the case of pronoun resolution the approach was too simple, leading to an improvement on the development set but not on the test set. The simplicity can be relaxed in several ways. The modules are trained with gold standard input, but using predictions of previous modules may give better results. If possible, the modules should be trained jointly. Adding more and more varied training data, such as from SoNaR-1 can be expected to yield better results. BERT performs better when finetuned and when encoding segments of 128 to-kens, as reported by Joshi et al. (2019) . Finally, other modules could be added. Anaphoricity classifiers are used in most state-of-the-art systems. In literature, dialogue is particularly important; annotating and predicting speakers of direct speech will help in resolving first and second person pronouns.", |
|
"cite_spans": [ |
|
{ |
|
"start": 669, |
|
"end": 688, |
|
"text": "Joshi et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Future work should investigate in more detail the trade-offs between rule-based systems using an NLP pipeline and modern end-to-end neural models, especially in the challenging case of longdocument coreference in narrative text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Code and models are available at https://github.com/ andreasvc/dutchcoref", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We are grateful to Tommaso Caselli and three anonymous reviewers for helpful comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "An annotated dataset of coreference in English literature", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Bamman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivia", |
|
"middle": [], |
|
"last": "Lewke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anya", |
|
"middle": [], |
|
"last": "Mansoor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "44--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Bamman, Olivia Lewke, and Anya Mansoor. 2020. An annotated dataset of coreference in English literature. In Proceedings of LREC, pages 44-54.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Bootstrapping path-based pronoun resolution", |
|
"authors": [ |
|
{ |
|
"first": "Shane", |
|
"middle": [], |
|
"last": "Bergsma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of COLING-ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--40", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1220175.1220180" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shane Bergsma and Dekang Lin. 2006. Bootstrapping path-based pronoun resolution. In Proceedings of COLING-ACL, pages 33-40.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Evaluating bias in Dutch word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [ |
|
"Alejandro" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ch\u00e1vez", |
|
"middle": [], |
|
"last": "Mulsa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerasimos", |
|
"middle": [], |
|
"last": "Spanakis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of GeBNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "56--71", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rodrigo Alejandro Ch\u00e1vez Mulsa and Gerasimos Spanakis. 2020. Evaluating bias in Dutch word em- beddings. In Proceedings of GeBNLP, pages 56-71.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Improving coreference resolution by learning entitylevel distributed representations", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "643--653", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1061" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark and Christopher D. Manning. 2016. Im- proving coreference resolution by learning entity- level distributed representations. In Proceedings of ACL, pages 643-653.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "BERTje: A Dutch BERT model", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Wietse De Vries", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arianna", |
|
"middle": [], |
|
"last": "Van Cranenburgh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommaso", |
|
"middle": [], |
|
"last": "Bisazza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Caselli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Malvina", |
|
"middle": [], |
|
"last": "Gertjan Van Noord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nissim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1912.09582" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wietse de Vries, Andreas van Cranenburgh, Arianna Bisazza, Tommaso Caselli, Gertjan van Noord, and Malvina Nissim. 2019. BERTje: A Dutch BERT model. arXiv:1912.09582.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "What's so special about BERT's layers? A closer look at the NLP pipeline in monolingual and multilingual models", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Wietse De Vries", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Malvina", |
|
"middle": [], |
|
"last": "Van Cranenburgh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nissim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4339--4350", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wietse de Vries, Andreas van Cranenburgh, and Malv- ina Nissim. 2020. What's so special about BERT's layers? A closer look at the NLP pipeline in mono- lingual and multilingual models. In Findings of EMNLP, pages 4339-4350.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of NAACL, pages 4171- 4186.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Limits of end-to-end learning", |
|
"authors": [ |
|
{ |
|
"first": "Tobias", |
|
"middle": [], |
|
"last": "Glasmachers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Asian Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "17--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tobias Glasmachers. 2017. Limits of end-to-end learn- ing. In Asian Conference on Machine Learning, pages 17-32. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Resolving pronoun references", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Jerry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hobbs", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1978, |
|
"venue": "Lingua", |
|
"volume": "44", |
|
"issue": "4", |
|
"pages": "311--338", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/0024-3841(78)90006-2" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jerry R Hobbs. 1978. Resolving pronoun references. Lingua, 44(4):311-338.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Span-BERT: Improving pre-training by representing and predicting spans", |
|
"authors": [ |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Weld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "64--77", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00300" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mandar Joshi, Danqi Chen, Yinhan Liu, Daniel S. Weld, Luke Zettlemoyer, and Omer Levy. 2020. Span- BERT: Improving pre-training by representing and predicting spans. Transactions of the Association for Computational Linguistics, 8:64-77.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "BERT for coreference resolution: Baselines and analysis", |
|
"authors": [ |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Weld", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of EMNLP-IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5807--5812", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mandar Joshi, Omer Levy, Luke Zettlemoyer, and Daniel Weld. 2019. BERT for coreference resolu- tion: Baselines and analysis. In Proceedings of EMNLP-IJCNLP, pages 5807-5812.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Deterministic coreference resolution based on entity-centric, precision-ranked rules", |
|
"authors": [ |
|
{ |
|
"first": "Heeyoung", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angel", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Peirsman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Chambers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational Linguistics", |
|
"volume": "39", |
|
"issue": "4", |
|
"pages": "885--916", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/COLI_a_00152" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heeyoung Lee, Angel Chang, Yves Peirsman, Nathanael Chambers, Mihai Surdeanu, and Dan Jurafsky. 2013. Deterministic coreference resolu- tion based on entity-centric, precision-ranked rules. Computational Linguistics, 39(4):885-916.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Stanford's multi-pass sieve coreference resolution system at the CoNLL-2011 shared task", |
|
"authors": [ |
|
{ |
|
"first": "Heeyoung", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Peirsman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angel", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Chambers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "28--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heeyoung Lee, Yves Peirsman, Angel Chang, Nathanael Chambers, Mihai Surdeanu, and Dan Jurafsky. 2011. Stanford's multi-pass sieve corefer- ence resolution system at the CoNLL-2011 shared task. In Proceedings of CoNLL, pages 28-34.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A scaffolding approach to coreference resolution integrating statistical and rule-based models", |
|
"authors": [ |
|
{ |
|
"first": "Heeyoung", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Natural Language Engineering", |
|
"volume": "23", |
|
"issue": "5", |
|
"pages": "733--762", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1017/S1351324917000109" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heeyoung Lee, Mihai Surdeanu, and Dan Jurafsky. 2017a. A scaffolding approach to coreference res- olution integrating statistical and rule-based models. Natural Language Engineering, 23(5):733-762.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "End-to-end neural coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "188--197", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1018" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenton Lee, Luheng He, Mike Lewis, and Luke Zettle- moyer. 2017b. End-to-end neural coreference reso- lution. In Proceedings of EMNLP, pages 188-197.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Higher-order coreference resolution with coarse-tofine inference", |
|
"authors": [ |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "687--692", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2108" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenton Lee, Luheng He, and Luke Zettlemoyer. 2018. Higher-order coreference resolution with coarse-to- fine inference. In Proceedings of NAACL, pages 687- 692.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Using automatically extracted minimum spans to disentangle coreference evaluation from boundary detection", |
|
"authors": [ |
|
{ |
|
"first": "Nafise Sadat", |
|
"middle": [], |
|
"last": "Moosavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [], |
|
"last": "Born", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimo", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4168--4178", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nafise Sadat Moosavi, Leo Born, Massimo Poesio, and Michael Strube. 2019. Using automatically ex- tracted minimum spans to disentangle coreference evaluation from boundary detection. In Proceedings of ACL, pages 4168-4178.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Which coreference evaluation metric do you trust? A proposal for a link-based entity aware metric", |
|
"authors": [ |
|
{ |
|
"first": "Sadat", |
|
"middle": [], |
|
"last": "Nafise", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Moosavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "632--642", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1060" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nafise Sadat Moosavi and Michael Strube. 2016. Which coreference evaluation metric do you trust? A proposal for a link-based entity aware metric. In Proceedings of ACL, pages 632-642.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Use generalized representations, but do not forget surface features", |
|
"authors": [ |
|
{ |
|
"first": "Sadat", |
|
"middle": [], |
|
"last": "Nafise", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Moosavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of CORBON", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-1501" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nafise Sadat Moosavi and Michael Strube. 2017. Use generalized representations, but do not forget surface features. In Proceedings of CORBON, pages 1-7.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "NP animacy identification for anaphora resolution", |
|
"authors": [ |
|
{ |
|
"first": "Constantin", |
|
"middle": [], |
|
"last": "Orasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Richard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Evans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "29", |
|
"issue": "", |
|
"pages": "79--103", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Constantin Orasan and Richard J Evans. 2007. NP ani- macy identification for anaphora resolution. Journal of Artificial Intelligence Research, 29:79-103.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A benchmark of rule-based and neural coreference resolution in Dutch novels and news", |
|
"authors": [ |
|
{ |
|
"first": "Corb\u00e8n", |
|
"middle": [], |
|
"last": "Poot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Van Cranenburgh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of CRAC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "79--90", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Corb\u00e8n Poot and Andreas van Cranenburgh. 2020. A benchmark of rule-based and neural coreference res- olution in Dutch novels and news. In Proceedings of CRAC, pages 79-90.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "CoNLL-2011 shared task: Modeling unrestricted coreference in OntoNotes", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lance", |
|
"middle": [], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sameer Pradhan, Lance Ramshaw, Mitchell Marcus, Martha Palmer, Ralph Weischedel, and Nianwen Xue. 2011. CoNLL-2011 shared task: Modeling un- restricted coreference in OntoNotes. In Proceedings of CoNLL, pages 1-27.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Towards coreference for literary text: Analyzing domain-specific phenomena", |
|
"authors": [ |
|
{ |
|
"first": "Ina", |
|
"middle": [], |
|
"last": "R\u00f6siger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Schulz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reiter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of LaTeCH-CLfL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "129--138", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ina R\u00f6siger, Sarah Schulz, and Nils Reiter. 2018. Towards coreference for literary text: Analyzing domain-specific phenomena. In Proceedings of LaTeCH-CLfL, pages 129-138.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Gender bias in coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Naradowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Leonard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8--14", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rachel Rudinger, Jason Naradowsky, Brian Leonard, and Benjamin Van Durme. 2018. Gender bias in coreference resolution. In Proceedings of ACL, pages 8-14.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Interacting semantic layers of annotation in SoNaR, a reference corpus of contemporary written Dutch", |
|
"authors": [ |
|
{ |
|
"first": "Ineke", |
|
"middle": [], |
|
"last": "Schuurman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V\u00e9ronique", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paola", |
|
"middle": [], |
|
"last": "Monachesi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2471--2477", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ineke Schuurman, V\u00e9ronique Hoste, and Paola Monach- esi. 2010. Interacting semantic layers of annotation in SoNaR, a reference corpus of contemporary writ- ten Dutch. In Proceedings of LREC, pages 2471- 2477.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "On the sample complexity of end-to-end training vs. semantic abstraction training", |
|
"authors": [ |
|
{ |
|
"first": "Shai", |
|
"middle": [], |
|
"last": "Shalev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Shwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amnon", |
|
"middle": [], |
|
"last": "Shashua", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1604.06915" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shai Shalev-Shwartz and Amnon Shashua. 2016. On the sample complexity of end-to-end training vs. semantic abstraction training. arXiv preprint arXiv:1604.06915.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A machine learning approach to coreference resolution of noun phrases", |
|
"authors": [], |
|
"year": 2001, |
|
"venue": "Computational Linguistics", |
|
"volume": "27", |
|
"issue": "4", |
|
"pages": "521--544", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/089120101753342653" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wee Meng Soon, Hwee Tou Ng, and Daniel Chung Yong Lim. 2001. A machine learning approach to coreference resolution of noun phrases. Computational Linguistics, 27(4):521-544.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Learning to Ignore: Long Document Coreference with Bounded Memory Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "Shubham", |
|
"middle": [], |
|
"last": "Toshniwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Wiseman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Allyson", |
|
"middle": [], |
|
"last": "Ettinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Livescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8519--8526", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.685" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shubham Toshniwal, Sam Wiseman, Allyson Ettinger, Karen Livescu, and Kevin Gimpel. 2020. Learn- ing to Ignore: Long Document Coreference with Bounded Memory Neural Networks. In Proceedings of EMNLP, pages 8519-8526.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "A Dutch coreference resolution system with an evaluation on literary fiction", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Van Cranenburgh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Computational Linguistics in the Netherlands Journal", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "27--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas van Cranenburgh. 2019. A Dutch coreference resolution system with an evaluation on literary fic- tion. Computational Linguistics in the Netherlands Journal, 9:27-54.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Mind the GAP: A balanced corpus of gendered ambiguous pronouns", |
|
"authors": [ |
|
{ |
|
"first": "Kellie", |
|
"middle": [], |
|
"last": "Webster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Recasens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Axelrod", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "605--617", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00240" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kellie Webster, Marta Recasens, Vera Axelrod, and Ja- son Baldridge. 2018. Mind the GAP: A balanced corpus of gendered ambiguous pronouns. Transac- tions of the Association for Computational Linguis- tics, 6:605-617.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "CorefQA: Coreference resolution as querybased span prediction", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arianna", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6953--6963", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.622" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Wu, Fei Wang, Arianna Yuan, Fei Wu, and Jiwei Li. 2020. CorefQA: Coreference resolution as query- based span prediction. In Proceedings of ACL, pages 6953-6963.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Incorporating context and external knowledge for pronoun coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Hongming", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yangqiu", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "872--881", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1093" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hongming Zhang, Yan Song, and Yangqiu Song. 2019. Incorporating context and external knowledge for pronoun coreference resolution. In Proceedings of NAACL, pages 872-881.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "OntoGUM: Evaluating contextualized SOTA coreference resolution on 12 more genres", |
|
"authors": [ |
|
{ |
|
"first": "Yilun", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zeldes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "461--467", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.acl-short.59" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yilun Zhu, Sameer Pradhan, and Amir Zeldes. 2021. OntoGUM: Evaluating contextualized SOTA coref- erence resolution on 12 more genres. In Proceedings of ACL, pages 461-467.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "An overview of the hybrid system." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "A training curve showing the effect of varying the amount of training data." |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"html": null, |
|
"text": "De chirurg] 1 kon [de pati\u00ebnt] 2 niet behandelen. [Hij] 2 was [[haar] 1 zoon] 2 ! [The surgeon] 1 couldn't treat [the patient] 2 .", |
|
"type_str": "table", |
|
"content": "<table/>" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"html": null, |
|
"text": "Table 1: Mention span classifier results on dev set (N=6434 mentions).", |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>recall prec.</td><td>F1</td></tr><tr><td>Rule-based</td><td colspan=\"2\">88.3 84.9 86.6</td></tr><tr><td>Classifier</td><td colspan=\"2\">88.8 87.2 88.0</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"html": null, |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td>: F1-scores for mention attributes on develop-</td></tr><tr><td>ment set with rule-based baseline using word lists (RB)</td></tr><tr><td>and the neural classifier (NEU).</td></tr></table>" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"html": null, |
|
"text": "Coreference results on the RiddleCoref dataset (predicted mentions, including singletons).", |
|
"type_str": "table", |
|
"content": "<table><tr><td>System</td><td>set</td><td>Mentions</td><td/><td/><td>LEA</td><td/><td>CoNLL Pron</td></tr><tr><td/><td>R</td><td>P</td><td>F1</td><td>R</td><td>P</td><td>F1</td><td>Acc</td></tr><tr><td>dutchcoref</td><td>test 100</td><td>100</td><td>100</td><td colspan=\"3\">59.55 69.70 64.22</td><td>77.75 69.59</td></tr><tr><td>dutchcoref+attr</td><td>test 100</td><td>100</td><td>100</td><td colspan=\"3\">61.16 70.69 65.58</td><td>78.88 74.07</td></tr><tr><td>dutchcoref+attr,pron</td><td>test 100</td><td>100</td><td>100</td><td colspan=\"3\">61.87 67.08 64.37</td><td>78.44 71.44</td></tr></table>" |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"html": null, |
|
"text": "Coreference results on the RiddleCoref dataset (gold mentions, including singletons).", |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"3\">Fiction Romance Suspense</td><td>Other</td></tr><tr><td>tokens</td><td>9664</td><td>4046</td><td colspan=\"2\">4533 34,354</td></tr><tr><td>avg sent len</td><td>15.5</td><td>14.9</td><td>17.5</td><td>16.1</td></tr><tr><td>ment. / ent.</td><td>3.0</td><td>2.6</td><td>2.2</td><td>3.5</td></tr><tr><td>ent. / tok.</td><td>0.08</td><td>0.10</td><td>0.11</td><td>0.07</td></tr><tr><td>% pronoun</td><td>35.1</td><td>42.6</td><td>37.9</td><td>42.0</td></tr><tr><td>% nominals</td><td>54.8</td><td>45.1</td><td>50.2</td><td>41.2</td></tr><tr><td>% names</td><td>10.2</td><td>12.2</td><td>11.9</td><td>16.8</td></tr><tr><td>CoNLL score</td><td>67.76</td><td>71.30</td><td>72.45</td><td>70.22</td></tr></table>" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"html": null, |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td>: Evaluation of the pronoun resolution module</td></tr><tr><td>trained on SoNaR-1, evaluated on different genres with</td></tr><tr><td>two documents per genre.</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |