|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:34:37.417874Z" |
|
}, |
|
"title": "Structured Prediction for Joint Class Cardinality and Entity Property Inference in Model-Complete Text Comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Hendrik", |
|
"middle": [], |
|
"last": "Ter", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Semantic Computing Group", |
|
"institution": "Bielefeld University", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Cimiano", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Semantic Computing Group", |
|
"institution": "Bielefeld University", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Model-complete text comprehension aims at interpreting a natural language text with respect to a semantic domain model describing the classes and their properties relevant for the domain in question. Solving this task can be approached as a structured prediction problem, consisting in inferring the most probable instance of the semantic model given the text. In this work, we focus on the challenging subproblem of cardinality prediction that consists in predicting the number of distinct individuals of each class in the semantic model. We show that cardinality prediction can successfully be approached by modeling the overall task as a joint inference problem, predicting the number of individuals of certain classes while at the same time extracting their properties. We approach this task with probabilistic graphical models computing the maximum-aposteriori instance of the semantic model. Our main contribution lies on the empirical investigation and analysis of different approximative inference strategies based on Gibbs sampling. We present and evaluate our models on the task of extracting key parameters from scientific full text articles describing pre-clinical studies in the domain of spinal cord injury.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Model-complete text comprehension aims at interpreting a natural language text with respect to a semantic domain model describing the classes and their properties relevant for the domain in question. Solving this task can be approached as a structured prediction problem, consisting in inferring the most probable instance of the semantic model given the text. In this work, we focus on the challenging subproblem of cardinality prediction that consists in predicting the number of distinct individuals of each class in the semantic model. We show that cardinality prediction can successfully be approached by modeling the overall task as a joint inference problem, predicting the number of individuals of certain classes while at the same time extracting their properties. We approach this task with probabilistic graphical models computing the maximum-aposteriori instance of the semantic model. Our main contribution lies on the empirical investigation and analysis of different approximative inference strategies based on Gibbs sampling. We present and evaluate our models on the task of extracting key parameters from scientific full text articles describing pre-clinical studies in the domain of spinal cord injury.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "While there has been significant progress on information extraction tasks with a comparably low level of structural complexity such as entity recognition (Goulart et al., 2011; Nadeau and Sekine, 2007) , relation extraction (Zhou et al., 2014; Kumar, 2017) , and co-reference resolution (Soon et al., 2001; Ferracane et al., 2016) , there is not much progress on capturing the comprehensive meaning of a text with respect to a given semantic model in terms of a given vocabulary of classes and properties. We refer to this task as model-complete text comprehension (MCTC) which requires to put all the above mentioned classical NLP-tasks into a larger context. The goal of MCTC is to capture all the information in the text that is expressible with respect to the semantic model, while ignoring those meaning aspects which are not. This can be framed as a structured prediction problem consisting in inferring the most plausible instance of the semantic model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 176, |
|
"text": "(Goulart et al., 2011;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 177, |
|
"end": 201, |
|
"text": "Nadeau and Sekine, 2007)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 243, |
|
"text": "(Zhou et al., 2014;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 244, |
|
"end": 256, |
|
"text": "Kumar, 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 287, |
|
"end": 306, |
|
"text": "(Soon et al., 2001;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 330, |
|
"text": "Ferracane et al., 2016)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One challenging problem in MCTC lies in the prediction of the correct number of individuals for each class, hereinafter referred to as cardinality prediction, that is answering the question(s): \"How many (and which) individuals of a class are mentioned in the text?\". In essence, this can be approached by grouping mentions of known realworld entities into equivalence classes, which has widely been addressed under the heading of coreference resolution (He, 2007; Singh et al., 2013) . However, in many problem domains, we need to identify equivalence classes of entities that are priorily unknown (in terms of not referring to a specific real-world entity). Thus, explicit mentions in text such as naming variations etc. can not be directly mapped to a set of existing entities. To the contrary, such entities are only distinguishable on the basis of their describing properties. Take the case of scientific publications concerning pre-clinical studies containing a variable number of experimental groups each of which is described by an injury model, an animal species, treatments etc. Here, mentions of experimental groups do not refer to existing real-world entities and they need to be inferred/grouped on the basis of their identifying properties that are mentioned in the text. We refer to the prediction of how many distinct individuals 1 of a particular class are (indirectly) mentioned in a text as cardinality prediction and solve it jointly with the prediction of the properties of each individual. We model this joint task as the task of predicting a (logical) model of the text, which involves making choices as to which individuals exist for each class.", |
|
"cite_spans": [ |
|
{ |
|
"start": 454, |
|
"end": 464, |
|
"text": "(He, 2007;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 465, |
|
"end": 484, |
|
"text": "Singh et al., 2013)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Towards capturing the dependence of class cardinalities and properties, we propose a joint inference approach that infers equivalence classes of entities in a text while at the same time predicting the properties of each equivalence class. We model this task as a statistical inference problem, relying on a factorized posterior conditional distribution p( y | x) as implemented in CRFs to approximate the true distribution over possible instantiations y \u2208 Y of the semantic model given a text x. Applying maximum-a-posteriori inference, we infer the most likely instance of the model that captures the whole meaning of the text as expressible by the semantic model. This includes the determination of the number of distinct equivalence classes (thus solving cardinality prediction) as well as predicting the properties for each equivalence class. Our approach is evaluated on text comprehension of research articles describing pre-clinical studies in the domain of spinal cord injury. Capturing correct key-parameters of the study protocol can be modeled as an MCTC problem as it requires a comprehensive understanding of the text rather than extracting single binary relations only. In this domain, we focus in particular on the extraction of experimental groups and their properties as described in Section 4.1. The data set 2 and the source code 3 are public available.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we answer the following research questions: 1) What is the advantage of jointly predicting the cardinality of classes and their properties over an isolated approach and how much does the prediction of the cardinality profit from the joint modelling?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2) What approximative inference strategies work best on this complex inference problem? We examine i) a vanilla Gibbs-based inference strategy ii) an inference strategy that is seeded with cardinality values based on a preceding clustering step., and iii) a parallel multi-chain inference strategy in which one chain is constructed for each potential cardinality value.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There are a number of traditional natural language processing tasks related to model-complete text comprehension. In this section, we briefly discuss each task and provide some pointers to systems addressing the corresponding task, focusing on the bio-medical domain. Entity Recognition and Linking (NER+L) describes the task of finding entity mentions in a text and linking them to unique concepts in some knowledge base. The task originated in the context of information extraction, consisting of identifying persons, company names etc. (Nadeau and Sekine, 2007) but has also received prominent attention in the biomedical field focusing on entities such as genes, diseases, treatments, etc. (Goulart et al., 2011) . NER+L is an important preliminary step in many downstream applications as it identifies core informational units that are needed for more complex analysis levels including relation extraction, slot filling, and MCTC.", |
|
"cite_spans": [ |
|
{ |
|
"start": 539, |
|
"end": 564, |
|
"text": "(Nadeau and Sekine, 2007)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 667, |
|
"end": 716, |
|
"text": "diseases, treatments, etc. (Goulart et al., 2011)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Relation Extraction (RE) describes the task of detecting relations between entities mentioned in a text (Giuliano et al., 2007) . While many models rely on a pipeline architecture predicting entities first and then predicting relations, more recent works model both tasks jointly (Luo et al., 2015) . Although there has been notable progress on RE in the last years, the task has been typically restricted to extracting binary relations within single sentence boundaries only (Zhou et al., 2014) . With our work, we strive to go beyond such simplifications towards document-level text interpretation with respect to a more complex model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 127, |
|
"text": "(Giuliano et al., 2007)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 298, |
|
"text": "(Luo et al., 2015)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 476, |
|
"end": 495, |
|
"text": "(Zhou et al., 2014)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Co-reference resolution (CRR) describes originally the task of finding nouns and pronouns that refer to the same underlying entity (Soon et al., 2001) . When applying CRR to the medical field, the task shifts towards the resolution of mentions of diseases, tests, compounds, groups, treatments, etc. (He, 2007) . Cardinality prediction in isolation can be modeled as a CRR problem, where the number of distinct non co-referring entities need to be found. With regard to the goal of comprehensive text understanding, classical co-reference resolution is clearly not enough, as also the properties of each entity need to be extracted. While Singh et al. (Singh et al., 2013 ) have attempted to model the tasks of entity recognition, relation extraction and co-reference resolution jointly, in their approach the interaction between relation extraction and co-reference resolution is not modelled directly, only via entity tags. In our approach we model the joint interaction between inducing equivalence classes (resolving co-references) while extracting the properties of entities/individuals as a basis to inform the decision about whether two individuals are the same (thus co-refer) given their properties. Durret et al. (Durrett et al., 2013) propose a global inference entity-level modeling for classical co-reference resolution based on a rich factor graph. In the unrolled factor graph, each factor refers to one entity property defined on a semantic or syntactic linguistic basis. In contrast to this work where properties of an individual/entity are pre-defined by the semantic model. Thus, our focus lies in their joint exploration while learning their interplay during inference in order to decide whether the properties belong to the same individual or not. Haghighi et al. (Haghighi and Klein, 2010) propose an unsupervised generative model incorporating several linguistic properties of the entity and its mention. In contrast, our work does not rely on entities that are explicitly mentioned in text. Instead, our model follows the schema of a semantic model to reason about the existence of individuals that can be inferred from the text and groups these individuals into groups by way of inferring the properties of these individuals.", |
|
"cite_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 150, |
|
"text": "(Soon et al., 2001)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 310, |
|
"text": "(He, 2007)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 652, |
|
"end": 671, |
|
"text": "(Singh et al., 2013", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1223, |
|
"end": 1245, |
|
"text": "(Durrett et al., 2013)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1785, |
|
"end": 1811, |
|
"text": "(Haghighi and Klein, 2010)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The task of slot-filling (SF) was first introduced in the Message Understanding Conference (Grishman and Sundheim, 1996) . It is concerned with predicting an entity-centric structure having a set of relations to other entities as it can be found e.g. in ontology-based information extraction (Sanchez-Cisneros and Aparicio Gali, 2013; Buitelaar et al., 2006) or extracting info-boxes from Wikipedia articles (Lange et al., 2010) . Contrary to MCTC, classical slot-filling requires the prediction of a single structure per document only, which heavily reduces relational complexity and does not include nested individuals. There are many approaches to SF ranging from relying on distant supervision as described by Surdeanu et al. (Surdeanu et al., 2010) to, more recently, neural approaches as described by Zhang et al. (Zhang et al., 2017) . Finally, SF can be seen as an upstream process for (cold-start) knowledge base population as described by ter Horst et al. .", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 120, |
|
"text": "(Grishman and Sundheim, 1996)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 334, |
|
"text": "(Sanchez-Cisneros and Aparicio Gali, 2013;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 335, |
|
"end": 358, |
|
"text": "Buitelaar et al., 2006)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 408, |
|
"end": 428, |
|
"text": "(Lange et al., 2010)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 714, |
|
"end": 753, |
|
"text": "Surdeanu et al. (Surdeanu et al., 2010)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 807, |
|
"end": 840, |
|
"text": "Zhang et al. (Zhang et al., 2017)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our work is highly related to information extraction systems in the (bio-) medical field. When it comes e.g. to the prediction of key parameters of clinical studies, most work focuses on the extraction of PICO-concepts: Patient/Problem (P), Intervention (I), Comparison (C) and Outcome (O). Summerscales et al. (Summerscales et al., 2009) have applied conditional random fields to extract key parameters from abstracts of clinical studies including treatments, experimental groups, and outcomes. Contrary to our approach, the task is defined as an NER+L problem, not aiming at capturing the semantic relations and concepts. Trenta et al. (Trenta et al., 2015) have proposed to rely on a maximum entropy classifier jointly extracting fine grained PICO elements from abstracts. Brujin et al. (De Bruijn et al., 2008) combined an SVM-based text classifier with regular expressions to extract PICO elements. Further, Ferracane et al. (Ferracane et al., 2016) aim to leverage co-reference resolution to identify experimental groups (patients) from medical abstracts. However, none of these works aims at deeper extraction of arms/experimental groups and their properties. In general, most approaches in the literature focus on sentence extraction and classification only (Mayer et al., 2018; Zhao et al., 2012; rather than on predicting a semantic structure.", |
|
"cite_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 338, |
|
"text": "(Summerscales et al., 2009)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 638, |
|
"end": 659, |
|
"text": "(Trenta et al., 2015)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 790, |
|
"end": 814, |
|
"text": "(De Bruijn et al., 2008)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 930, |
|
"end": 954, |
|
"text": "(Ferracane et al., 2016)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1266, |
|
"end": 1286, |
|
"text": "(Mayer et al., 2018;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1287, |
|
"end": 1305, |
|
"text": "Zhao et al., 2012;", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Structured prediction describes a variety of tasks with the goal of predicting a pre-defined target structure that is extracted from an unstructured input text (Smith, 2011) . We formulate the MCTC problem as a structured prediction task, where the structure to be predicted is an instance of the semantic model capturing the meaning of a text. This involves the task of predicting the number of individuals of each class (cardinality prediction) as well as predicting the values of the key properties of each individual. Our proposed method relies on probabilistic graphical models i.e. conditional random fields (CRFs; (Lafferty et al., 2001; Sutton et al., 2012) ) as their application is well established in many structured prediction tasks in the context of NLP.", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 173, |
|
"text": "(Smith, 2011)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 621, |
|
"end": 644, |
|
"text": "(Lafferty et al., 2001;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 645, |
|
"end": 665, |
|
"text": "Sutton et al., 2012)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Encoding Semantic Models: An instance of the semantic model is encoded as a nested vector y containing as many elements as there are classes and properties in the model. Thus, given a set of classes {C 1 , . . . , C n } and a set of properties P = Figure 1 : Schematized factor graph unrolled over the previously shown example. We introduce unary property factors connected to a single property of a single individual and pairwise property factors, connected to two properties of one or two individuals. Both factors are additionally connected to the cardinality variables jointly modelling the properties and cardinalities. For clarity, we omit the observed variables in this example.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 248, |
|
"end": 256, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "{P 1 , . . . , P m }, y can be written as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "{ v C 1 , ..., v Cn } where each v C i has the form [|C i |, I i 1 , . . . , I i m ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ". |C i | represents the cardinality of class C i , i.e. the number of individuals of class C i mentioned in the text. I i j \u2286 P is a vector describing an individual of class C i in terms of its properties.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Example: Consider a semantic model consisting of two classes C 1 and C 2 where individuals of class C 1 have properties hasA, hasB, hasC, and individuals of class C 2 have properties hasD, hasE. One specific instance of the semantic model would be represented as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "[[2, [a 1 , b 1 , c 1 ], [a 1 , b 1 , c 2 ]], [1, [d 1 , e 2 ]]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "]. The first component of the first tuple shows that there are two individuals of class C 1 . The first individual has the property values a 1 , b 1 , c 1 for properties hasA, hasB, hasC, respectively. The second individual of class C 1 has property values a 1 , b 1 , c 2 for the above mentioned properties. The second tuple shows that there is one individual of class C 2 which has property values d 1 , e 2 for properties hasD, hasE, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Let Y be the set of all possible (nested) vectors over a given vocabulary of classes and properties as exemplified above. Intuitively, this is the set of all possible instantiations of the semantic model. With x being the set of observed input variables corresponding to the list of tokens of the input text, the conditional probability of a specific instance of the semantic model y \u2208 Y is p( y| x; \u03b8), with \u03b8 being a learned model parameter vector. The best value assignment to the set of target variables, denoted as\u02c6 y, is found by maximum a-posteriori (MAP) inference as shown in Equation 1:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CRF-based Modelling", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y = argmax y\u2208Y p( y| x; \u03b8)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "CRF-based Modelling", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As inference in high dimensional vector spaces is often intractable, conditional random fields decompose the joint probability into individual factors. The set of factors and their operating scope is defined by a factor graph (Kschischang et al., 2001; Koller and Friedman, 2009) . A factor graph is a bipartite undirected graph G = (V, F ) consisting of a set of factors F and a set of variables V defined as the union of the observed input and the target output variables V = y \u222a x. A factor \u03a8 \u2208 F is a non-negative real-valued exponential function \u03a8 : V \u2192 R \u22650 that computes a scalar score based on a subset \u03c9 \u2286 V of random variables defining its operating scope \u03a8(\u03c9) = exp( f (\u03c9), \u03b8 \u03a8 ), with f (\u2022) representing a feature vector based on a set of indicator functions, and \u03b8 \u03a8 referring to the set of model weights that are shared between factors of the same type. In our approach, it is crucial to capture dependencies between multiple target variables, in particular between the variables representing the cardinalities of classes and variables representing the individuals' properties. For this reason, we introduce factors that model the interaction between all pairs of property variables while having access to the cardinalities. We schematize our factor graph in Figure 1 , unrolled over the previously given example. Let C denote the vector of the cardinalities of all classes and |C i | \u2208 C the cardinality of class C i . The decomposition of the conditional probability p( y | x; \u03b8) can be written as shown in Equation 2:", |
|
"cite_spans": [ |
|
{ |
|
"start": 226, |
|
"end": 252, |
|
"text": "(Kschischang et al., 2001;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 279, |
|
"text": "Koller and Friedman, 2009)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1273, |
|
"end": 1281, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "CRF-based Modelling", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "1 Z y i \u2208 y \u03a8 ( C, y i , x) y j \u2208 y\\{y i } \u03a8 ( C, y i , y j , x) (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CRF-based Modelling", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where Z denotes the partition function and \u03a8 (\u2022), \u03a8 (\u2022) denote factors defined for single and pairs of output variables while having access to the cardinalities C.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CRF-based Modelling", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The unrolling of factors over the input is performed using imperatively defined factor graphs as proposed by McCallum et al. (McCallum et al., 2009) . For approximative inference of the posterior distribution, we rely on the state-based Markov Chain Monte Carlo sampling paradigm. Proposal states are computed and sampled via Gibbs sampling (Casella and George, 1992) . While training, the model parameters \u03b8 are updated with SampleRank (Wick et al., 2009) that is computing parameter update gradients based on an objective comparison of two states, usually between the current state and the selected successor state (cf. next sections for proposed variations). In our approach the objective is to maximize the F 1 score to the ground truth.", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 148, |
|
"text": "McCallum et al. (McCallum et al., 2009)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 367, |
|
"text": "(Casella and George, 1992)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 437, |
|
"end": 456, |
|
"text": "(Wick et al., 2009)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CRF-based Modelling", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In the following, we propose our inference strategies to MCTC with a focus towards cardinality prediction. In state-based inference, a state s t is defined as one specific variable assignment to the target structure y at a specific time point t. While inference proceeds, in each step a set of proposal states S t+1 is computed based on a list of predefined atomic change rules that are applied to the current state, e.g. changing cardinalities of classes or the properties of individuals. The successor state s t+1 \u2208 S t+1 is sampled from the generated set of proposal states.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State-based Inference Strategies", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Vanilla Inference: The vanilla inference is based on traditional Gibbs sampling. The inference procedure is initialized with one empty state s 0 that is y = \u2205 (no values are assigned) and iteratively updated with atomic change rules. Modifying the cardinality for a class C i is defined as either deleting an existing individual of index j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State-based Inference Strategies", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "( y \u2190 y \\ I i j ; |C i | \u2190 (|C i | \u2212 1)) or adding a new individual with leading index |C i | ( y \u2190 y \u222a I i |C i | ; |C i | \u2190 (|C i | + 1)).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State-based Inference Strategies", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "On the level of individuals, an atomic change is defined as deleting, adding, or changing a property value. The inference procedure terminates if the model parameter update converges. The final state represents the most likely instance of the semantic model. Cardinality Seeded + Inference: In the seeded + inference (c.f. Figure 2) , the first state s 0 is initialized with an a priori predicted cardinality value \u03bb C i for each class C i , which is re-sampled as inference proceeds. For this, the system relies on the same atomic change and termination rules as defined for the vanilla inference.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 323, |
|
"end": 332, |
|
"text": "Figure 2)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "State-based Inference Strategies", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Parallel Multi Chain Inference: The parallel multi chain inference procedure (c.f. Figure 3) is initialized with n independent Markov chains S 0 = [s 0 1 , s 0 2 , . . . , s 0 n ] that are explored in parallel but independently from each other. Each state s 0 i \u2208 S 0 is initialized with a fixed number of individuals for each class type ranging between a Figure 2 : Schematized seeded + inference. The input is the seed parameter \u03bb which is used to initialize the cardinality of the first state. Within the proposal states, \u03bb can be altered. In each time step the model is updated based on the current state and successor state.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 92, |
|
"text": "Figure 3)", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 364, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "State-based Inference Strategies", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "pre-defined minimum \u03b1 C i and maximum \u03b2 C i . Contrary to the previous inference strategies, the cardinalities in each chain are not sampled over but remain fixed. Only the property values of the individuals are sampled. The parallel sampling is independent in the sense that for each chain the computation of the set of proposal states and the selection of the successor states is independent of the other chains. The model parameters \u03b8 however are shared throughout all chains and are thus updated n times every time step; once for each pair of current-successor state. This inference procedure terminates if all chains converge. The final output is selected based on the highest model probability among the final states of all chains.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State-based Inference Strategies", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Parallel Multi Chain Inference + : The parallel multi chain inference with cross-chain model updates strategy builds on the previous inference strategy in that it includes parallel inference chains with fixed cardinality but integrates cross-chain model update operations after each time step (bold triangle in Figure 3 ). That is, in addition to the n model updates, a set of state-pairs is computed by pairwise combining the selected successor states of the chains for cross-over model updates. This generates n 2 +n 2 model parameter updates in each time step. The motivation for this cross-chain model updates is to force the model to learn to prefer the correct cardinality values.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 319, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "State-based Inference Strategies", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Factors are defined in terms of indicator functions that measure the compatibility of variable assignments to the output structure y given the input document x. In the following, we explain four types of feature groups that we consider in our model. The proposed features are intuitively designed to capture document-level semantics and finally selected empirically based on an evaluation of a subset of the training data. For each value between the input \u03b1 and \u03b2 a Markov chain is instantiated. In each time step the model is updated between the current and successor states for each chain. In the advanced version, additional model update operations based on the successor states are added (bold triangle). Document-level: Document-level features measure the compatibility of property assignments of individuals based on the textual content of the document represented as 3-grams. For this, triples are considered for plausibility, containing the property type, the entity type of the property value, and its textual representation as 3-grams. We further measure the compatibility of pairwise assignments of property values considering their sentential distance, assuming that values within the same property (in case of multi value properties) or individual (throughout multiple properties) are more likely to appear closer together rather than being spread across the document. Document-structure: Document structure features rely on a heuristic segmentation of the document into the standard sections of a scientific article: abstract, introduction, method, results, discussion, references, and unknown. We compute features that capture 3-grams mentioned in specific sections of the article as indicators for the assignment of certain values to properties. By this, we can model that certain content is expected in certain sections and should override inconsistent information appearing in other sections.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Cardinality: Aiming at cardinality prediction, we measure the compatibility of cardinality values in dependence of other random variables in y. For this, we make the choice of a cardinality dependent on n-grams appearing in the surface forms of property values.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In addition, we also consider features implementing a prior for the cardinalities of classes as well as for the number of different values for multi-value properties. By this, the model is able to learn a class/property-specific distribution of cardinality values. For example, assuming that the cardinality of a class\u0108 has a very high a priori likelihood for a specific value \u03bb\u0108 throughout the training data, this puts pressure on the model during inference to prefer model instances where there are \u03bb\u0108 individuals for the respective class, unless other features provide strong evidence for the contrary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Within-and Across-Individual Coherence: Sometimes values of properties are shared across individuals within the same class. Thus, we measure the compatibility of value assignments across properties within one individual, but also how plausible it is that a certain value is shared across individuals.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Model-complete text comprehension aims at the automatic instantiation of a semantic model based on information extracted from a natural language text. Such an instance contains information about individuals of equivalence classes, their cardinality and their properties. Thus the overall task of MCTC can be evaluated towards i) the correct prediction of the number of individuals, and ii) the prediction of properties for each individual. In the following, we describe our use case application, the experimental procedure and results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We apply our approach to full text articles describing pre-clinical studies in the domain of spinal cord injury. Our semantic model is an excerpt of the Spinal Cord Injury Ontology (SCIO) (Brazda et al., 2017) centered on the key concept of an experimental group. An experimental group represent an animal model to which a certain injury and treatment is applied and is described by four key properties: i) hasSpecies specifying the species that the animal model belongs to, ii) hasInjury specifying the experimentally inflicted injury, ii) hasTreatment is the list of treatments that were applied, and iv) has-Name is a list of naming variations for that animal group that are used throughout the document. Note that, in accordance with domain experts, only the first three properties are considered to be relevant to describe the experimental group semantically and thus are evaluated. However, the property has-Name can be seen as an auxiliary property that is not necessary to understand the study but provides useful information, e.g. to detect co-references.", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 209, |
|
"text": "(Brazda et al., 2017)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Model and Data Set", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The data set contains full text articles that have been annotated by three domain experts using the SANTO framework . Annotations are available on the full level of relevant concepts of SCIO. Each document can be seen as a data point that is annotated with an instance of the semantic model containing a list of experimental groups and their properties. While annotations for the hasName property are linked to specific textual phrases in the document, all other properties are annotated in a distantly supervised fashion. In a preliminary step, we apply a named entity recognition heuristic based on automatically generated regular expressions to compute a set of documentbased annotations for all classes and property values that exist in the semantic model. The names of groups are additionally extracted with a standard CRF using standard token-level features. The final data set contains 96 data points with an average length of approx. 273 sentences per document and a total number of 345 experimental groups (\u00b5 = 3.3, \u03c3 = 1.3, min = 2, max = 7).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Model and Data Set", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Our proposed inference strategies rely on a prior estimation of the number of individuals for initialization. As described in Section 3.2 the seeded inference strategy requires the seed variable \u03bb. The parallel multi chain inference requires a range of cardinality values 0 \u2264 \u03b1 \u2264 \u03b2. Details about their estimation are briefly described below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference Parameter Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Seed Prior Cardinality Estimation \u03bb The cardinality seeded (+) inference procedure requires the estimation of the seed parameter \u03bb for each class determining the number of individuals (experimental groups) the initial state begins with. \u03bb is computed by relying on the k-Means algorithm by clustering group names based on textual features. The cluster quality of k-Means depends on two main parameters. First, the determination of the number of clusters, for which we rely on the residual sum of squares (RSS) algorithm with an empirically determined penalization factor for large number of clusters. Second, we rely on a function measuring the distance between two data points, i.e. between two group names. We compare three distance functions: i) Levenshtein distance with a k-Medoid implementation of k-Means, ii) cosine distance of the averaged sum of pre-trained Pubmed-based word embeddings induced with Word2vec (Mikolov et al., 2013) , and iii) a random forest classifier (Liaw et al., 2002) with a correlation based feature selection (resulting in Smith-Waterman and 3-gram based Jaccard similarity as features).We evaluate the performances based on the F 1 score using a reference clustering as ground truth obtained from our annotated data set. We define a true positive as a group name that is in the correct cluster, a false positive if it is in a wrong cluster, and a false negative if it is missing in its respective cluster. The Levenshtein distance performed with F 1 = 0.41, the Word2Vec-based cosine distance performs slightly better with F 1 = 0.45, while the random forest classifier reaches a value of F 1 = 0.56. With the random forest outperforming both other models, we rely on this distance function in a k-Means clustering for estimating \u03bb.", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 62, |
|
"text": "(+)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 919, |
|
"end": 941, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 980, |
|
"end": 999, |
|
"text": "(Liaw et al., 2002)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference Parameter Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Parallel Multi Chain (+) Parameters \u03b1 and \u03b2 The parallel multi chain (+) inference strategies require the estimation of a minimum (\u03b1) and a maximum (\u03b2) number of individuals assuming that the correct cardinality lies between \u03b1 and \u03b2. We estimate both parameters in dependence of the average cardinality of individuals in the training set. With \u00b5 being the average cardinality and \u03c3 its standard deviation, we set \u03b1 = \u00b5 \u2212 \u03c3 and \u03b2 = \u00b5 + \u03c3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 24, |
|
"text": "(+)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 69, |
|
"end": 72, |
|
"text": "(+)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference Parameter Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Our experiments follow a randomized cross validation regime as usual for experiments on relatively small data sets. We ran each experiment 10 times with a random split into 80% training data (76 in number) and 20% test data (20 in number). We provide evaluation results in terms of precision, recall, and F 1 macro averaged over all documents in three configurations:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Setting", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Cardinality Prediction (CP): We compare the predicted cardinality p c to the ground truth cardinality g c where tp = min(p c , g c ), f p = max(0, (p c \u2212 g c )), and f n = max(0, (g c \u2212 p c ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Setting", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Property Prediction (PP): We compare the predicted property values to the ground truth property values where a true positive is a correctly assigned property value, a false positive is a wrongly assigned property value and a false negative as a missing property value of an individual.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Setting", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We compute the harmonic mean between the cardinality and property prediction scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combined (Comb):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our experiments comprise the evaluation of four models each of which is based on one of the de-scribed joint inference strategies predicting cardinality and properties at the same time, as well as a pure cardinality prediction baseline ignoring property prediction. The joint inference models are: RSS: the seeded inference with a fixed cardinality, RSS + : the seeded inference that allows further sampling of the cardinality as described in Section 3.2, PAR: the parallel multi chain inference, and PAR + : the parallel multi chain inference with chain-cross over model updates. As cardinality baseline(s), we provide Co-ref CRF, a CRF based method for clustering group names without a joint prediction of properties, relying on linguistic features only, and RSS, the cardinality as predicted by the RSS based k-Means as reported in the RSS-model. Note that the cardinality in RSS is fixed and does not change during inference so that it can be seen as a baseline for predicting the cardinalities. The experimental results of those models are reported in Table 1 . In Table 2 , we compare the run time and the number of generated states for the four inference methods.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1057, |
|
"end": 1064, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1070, |
|
"end": 1077, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We analyze the results with respect to three different aspects: i) performance of the cardinality prediction, ii) overall performance as measured by the combined harmonic mean, and iii) performance with respect to the run time and complexity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "The performances of the cardinality prediction can be seen in the first row of Table 1 . The CRF-based baseline already yields a very strong F 1 -score of 0.79 which shows that cardinality prediction with linguistic features ignoring property prediction provides already decent results. The k-Means approach with an unsupervised RSS cluster estimation yields a cardinality F 1 -score of 0.64, performing worse than the CRF baseline. When seeding our approximate inference approach with prior cardinality values (RSS + ), the F 1 -score considerably improves by 19 %-points up to 0.83, even outperforming the CRF baseline. The cardinality prediction in PAR performs comparably strong with an F 1 -score of 0.81. This score is further outperformed when integrating the cross-chain model update operation. PAR+ archives performs best in predicting the cardinalities with an F 1 -score of 0.84.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 86, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Cardinality Prediction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Overall Score The performances of the overall prediction can be seen in the second to last rows in Table 1 . With respect to the property prediction, RSS performs best with a score of 0.57, mainly due to the correct detection of TREATMENTS (0.67) and SPECIES (0.62). With a low cardinality performance, the overall score sums up to 0.63 in F 1 . The strong increase in the performance of cardinality prediction in RSS + , compared to the RSS model comes at the cost of an inferior property prediction quality. The combined score for RSS + however shows slightly better results with an F 1score of 0.65. The property prediction in PAR shows similar results to the RSS + for INJURY, a slight decrease for TREATMENTS, and a huge increase (10% points) for SPECIES. The PAR model yields an overall score of 0.66. Activating crosschain model updates (PAR + ), the property prediction shows a performance increase by 8% points for hasTreatment while for both other properties the value is similar to PAR. The PAR + model outperforms all other models in the overall score, but lacks 4%-points for property prediction in comparison to RSS. The results show that property prediction works best when fixing the number of individuals. With PAR + model working best for cardinality prediction, an interesting model combination could be to use the cardinality output of PAR + as initialization to RSS. This however, is left for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 106, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Cardinality Prediction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Run Time Performance The run time as well as the number of states for each inference method is shown in Table 2 . We report statistics on the average time in seconds (s) that is needed to process a document and depict the search space complexity by providing the average number of generated and evaluated states in thousands (k). All experiments ran on an Intel(R) Xeon(R) CPU E5-2630 v3 @ 2.40GHz with 16 cores and 120 GB of available RAM. No GPU or further hardware acceleration was used. The table shows that RSS has the lowest complexity in terms of state generation, which is due to the fixed cardinality and in consequence a significantly reduced search space. In RSS + , we notice a huge increase in the number of generated states by a factor of around 7. At the same time, we observe that the run time factor rises only by a factor of 2.2 in training and 2.8 in test. It is noticeable that the number of generated states and the run time at test time decreases from PAR to PAR + which is probably due to a faster model convergence, however training run time increases. Table 1 : Results of the cardinality baseline(s) and of the inference strategies for joint cardinality and property prediction. We provide macro-F 1 , precision, and recall averaged over 10 runs with random 80/20 splits. Table 2 : Run time and complexity statistics of the inference strategies. We provide the average number of evaluated states in thousands (k), averaged training and test time per document in seconds (s).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 111, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1077, |
|
"end": 1084, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1298, |
|
"end": 1305, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Cardinality Prediction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have proposed an approach to the task of modelcomplete text comprehension (MCTC) that relies on a learned model of the posterior distribution of instances of a semantic model given a text to infer the most likely instance of a semantic model that captures the meaning of the text best. We have relied on CRFs to model the conditional distribution in a factorized way and empirically investigated the impact of different approximate inferences strategies on our problem. Our experiments on the task of predicting the structure of experimental groups from scientific full text articles describing pre-clinical studies in the field of spinal cord injury show that modeling the MCTC task as a joint inference problem, extracting the cardinality in combination with predicting the properties of the individuals, outperforms a number of reasonable baselines predicting the cardinality alone. In future work, we intend to investigate combinations of our inference strategies, relying on the result state produced by our PAR + inference strategy to seed the RSS inference method to re-sample the property values, expecting to see an overall gain in both cardinality prediction and entity property prediction over both inference strategies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We refer to mentions of entities in a text as entities and to the denotation of such entities in a given model of the text as individuals", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://psink.techfak.uni-bielefeld. de/spnlp-2020/mctc-spnlp2020.zip 3 https://github.com/ag-sc/ SCIOExtraction", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work has been funded by the Federal Ministry of Education and Research (BMBF, Germany) in the PSINK project (project number 031L0028A).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Scio: an ontology to support the formalization of pre-clinical spinal cord injury experiments", |
|
"authors": [ |
|
{ |
|
"first": "Nicole", |
|
"middle": [], |
|
"last": "Brazda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hendrik", |
|
"middle": [], |
|
"last": "Ter Horst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Hartung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cord", |
|
"middle": [], |
|
"last": "Wiljes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veronica", |
|
"middle": [], |
|
"last": "Estrada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Klinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Kuchinke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hans", |
|
"middle": [ |
|
"Werner" |
|
], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Cimiano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proc. of the 3rd Joint Ontology Workshops (JOWO): Ontologies and Data in the Life Sciences", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicole Brazda, Hendrik ter Horst, Matthias Hartung, Cord Wiljes, Veronica Estrada, Roman Klinger, Wolfgang Kuchinke, Hans Werner M\u00fcller, and Philipp Cimiano. 2017. Scio: an ontology to sup- port the formalization of pre-clinical spinal cord in- jury experiments. In Proc. of the 3rd Joint Ontology Workshops (JOWO): Ontologies and Data in the Life Sciences, volume 2050.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Ontology-based information extraction with soba", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Buitelaar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Cimiano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefania", |
|
"middle": [], |
|
"last": "Racioppa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Siegel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of the International Conference on Language Resources and Evaluation (LREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Buitelaar, Philipp Cimiano, Stefania Racioppa, and Melanie Siegel. 2006. Ontology-based informa- tion extraction with soba. In Proc. of the Interna- tional Conference on Language Resources and Eval- uation (LREC).", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Explaining the gibbs sampler", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Casella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Edward I George", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "The American Statistician", |
|
"volume": "46", |
|
"issue": "3", |
|
"pages": "167--174", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George Casella and Edward I George. 1992. Explain- ing the gibbs sampler. The American Statistician, 46(3):167-174.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Automated information extraction of key trial design elements from clinical trial publications", |
|
"authors": [ |
|
{ |
|
"first": "Simona", |
|
"middle": [], |
|
"last": "Berry De Bruijn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Carini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ida", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of the AMIA Annual Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Berry De Bruijn, Simona Carini, Svetlana Kiritchenko, Joel Martin, and Ida Sim. 2008. Automated infor- mation extraction of key trial design elements from clinical trial publications. In Proc. of the AMIA An- nual Symposium, volume 2008, page 141. American Medical Informatics Association.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Decentralized entity-level modeling for coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Durrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "114--124", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Greg Durrett, David Hall, and Dan Klein. 2013. Decen- tralized entity-level modeling for coreference resolu- tion. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 114-124.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Leveraging coreference to identify arms in medical abstracts: An experimental study", |
|
"authors": [ |
|
{ |
|
"first": "Elisa", |
|
"middle": [], |
|
"last": "Ferracane", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iain", |
|
"middle": [], |
|
"last": "Marshall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Byron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Erk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proc. of the Seventh International Workshop on Health Text Mining and Information Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "86--95", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elisa Ferracane, Iain Marshall, Byron C Wallace, and Katrin Erk. 2016. Leveraging coreference to iden- tify arms in medical abstracts: An experimental study. In Proc. of the Seventh International Work- shop on Health Text Mining and Information Analy- sis, pages 86-95.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Relation extraction and the influence of automatic named-entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Claudio", |
|
"middle": [], |
|
"last": "Giuliano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Lavelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lorenza", |
|
"middle": [], |
|
"last": "Romano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "ACM Transactions on Speech and Language Processing (TSLP)", |
|
"volume": "5", |
|
"issue": "1", |
|
"pages": "1--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claudio Giuliano, Alberto Lavelli, and Lorenza Ro- mano. 2007. Relation extraction and the influence of automatic named-entity recognition. ACM Trans- actions on Speech and Language Processing (TSLP), 5(1):1-26.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A systematic review of named entity recognition in biomedical texts", |
|
"authors": [], |
|
"year": 2011, |
|
"venue": "Journal of the Brazilian Computer Society", |
|
"volume": "17", |
|
"issue": "2", |
|
"pages": "103--116", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rodrigo Rafael Villarreal Goulart, Vera L\u00facia Strube de Lima, and Clarissa Castell\u00e3 Xavier. 2011. A systematic review of named entity recognition in biomedical texts. Journal of the Brazilian Computer Society, 17(2):103-116.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Message understanding conference-6: A brief history", |
|
"authors": [ |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Beth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sundheim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proc. of the 16th International Conference on Computational Linguistics (COLING)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ralph Grishman and Beth M Sundheim. 1996. Mes- sage understanding conference-6: A brief history. In Proc. of the 16th International Conference on Com- putational Linguistics (COLING).", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Coreference resolution in a modular, entity-centered model", |
|
"authors": [ |
|
{ |
|
"first": "Aria", |
|
"middle": [], |
|
"last": "Haghighi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "385--393", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aria Haghighi and Dan Klein. 2010. Coreference res- olution in a modular, entity-centered model. In Hu- man Language Technologies: The 2010 Annual Con- ference of the North American Chapter of the As- sociation for Computational Linguistics, pages 385- 393.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Santo: a web-based annotation tool for ontology-driven slot filling", |
|
"authors": [ |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Hartung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hendrik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Horst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Grimm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Diekmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Klinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cimiano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ACL 2018, System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "68--73", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthias Hartung, Hendrik ter Horst, Frank Grimm, Tim Diekmann, Roman Klinger, and Philipp Cimi- ano. 2018. Santo: a web-based annotation tool for ontology-driven slot filling. In Proceedings of ACL 2018, System Demonstrations, pages 68-73.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Coreference resolution on entities and events for hospital discharge summaries", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tian Ye He", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tian Ye He. 2007. Coreference resolution on entities and events for hospital discharge summaries. Ph.D. thesis, Massachusetts Institute of Technology.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Cold-start knowledge base population using ontology-based information extraction with conditional random fields", |
|
"authors": [ |
|
{ |
|
"first": "Horst", |
|
"middle": [], |
|
"last": "Hendrik Ter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Hartung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Cimiano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc. of the Reasoning Web International Summer School (RW)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "78--109", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hendrik ter Horst, Matthias Hartung, and Philipp Cimi- ano. 2018. Cold-start knowledge base population us- ing ontology-based information extraction with con- ditional random fields. In Proc. of the Reasoning Web International Summer School (RW), pages 78- 109. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Probabilistic Graphical Models. Principles and Techniques", |
|
"authors": [ |
|
{ |
|
"first": "Daphne", |
|
"middle": [], |
|
"last": "Koller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nir", |
|
"middle": [], |
|
"last": "Friedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daphne Koller and Nir Friedman. 2009. Probabilistic Graphical Models. Principles and Techniques. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Factor Graphs and Sum Product Algorithm", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brendan", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Kschischang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hans-Andrea", |
|
"middle": [], |
|
"last": "Frey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Loeliger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "IEEE Transactions on Information Theory", |
|
"volume": "47", |
|
"issue": "2", |
|
"pages": "498--519", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Frank R. Kschischang, Brendan J. Frey, and Hans- Andrea Loeliger. 2001. Factor Graphs and Sum Product Algorithm. IEEE Transactions on Informa- tion Theory, 47(2):498-519.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A survey of deep learning methods for relation extraction", |
|
"authors": [ |
|
{ |
|
"first": "Shantanu", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shantanu Kumar. 2017. A survey of deep learn- ing methods for relation extraction. CoRR, abs/1705.03645.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Conditional Random Fields. Probabilistic Models for Segmenting and Labeling Sequence Data", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proc. of the International Conference on Machine Learning (ICML)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "282--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Lafferty, Andrew McCallum, and Fernando Pereira. 2001. Conditional Random Fields. Prob- abilistic Models for Segmenting and Labeling Se- quence Data. In Proc. of the International Confer- ence on Machine Learning (ICML), pages 282-289.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Extracting structured information from wikipedia articles to populate infoboxes", |
|
"authors": [ |
|
{ |
|
"first": "Dustin", |
|
"middle": [], |
|
"last": "Lange", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christoph", |
|
"middle": [], |
|
"last": "B\u00f6hm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Naumann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of the ACM International Conference on Information and Knowledge Management (CIKM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1661--1664", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dustin Lange, Christoph B\u00f6hm, and Felix Naumann. 2010. Extracting structured information from wikipedia articles to populate infoboxes. In Proc. of the ACM International Conference on Information and Knowledge Management (CIKM), pages 1661- 1664.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Classification and regression by randomforest. R news", |
|
"authors": [ |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Liaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Wiener", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "18--22", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andy Liaw, Matthew Wiener, et al. 2002. Classifi- cation and regression by randomforest. R news, 2(3):18-22.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Joint entity recognition and disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Gang", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaojiang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zaiqing", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "879--888", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gang Luo, Xiaojiang Huang, Chin-Yew Lin, and Za- iqing Nie. 2015. Joint entity recognition and dis- ambiguation. In Proc. of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 879-888.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Evidence type classification in randomized controlled trials", |
|
"authors": [ |
|
{ |
|
"first": "Tobias", |
|
"middle": [], |
|
"last": "Mayer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Cabrio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serena", |
|
"middle": [], |
|
"last": "Villata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc. of the 5th Workshop on Argument Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "29--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tobias Mayer, Elena Cabrio, and Serena Villata. 2018. Evidence type classification in randomized con- trolled trials. In Proc. of the 5th Workshop on Ar- gument Mining, pages 29-34. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Factorie: Probabilistic programming via imperatively defined factor graphs", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karl", |
|
"middle": [], |
|
"last": "Schultz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1249--1257", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew McCallum, Karl Schultz, and Sameer Singh. 2009. Factorie: Probabilistic programming via im- peratively defined factor graphs. In Proc. of Ad- vances in Neural Information Processing Systems (NIPS), pages 1249-1257.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of the Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositional- ity. In Proc. of the Advances in Neural Information Processing Systems (NIPS), pages 3111-3119.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "A survey of named entity recognition and classification", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Nadeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satoshi", |
|
"middle": [], |
|
"last": "Sekine", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Lingvisticae Investigationes", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "3--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Nadeau and Satoshi Sekine. 2007. A survey of named entity recognition and classification. Lingvis- ticae Investigationes, 30(1):3-26.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "UEM-UC3M: An ontology-based named entity recognition system for biomedical texts", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Sanchez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Cisneros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [ |
|
"Aparicio" |
|
], |
|
"last": "Gali", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of the Seventh International Workshop on Semantic Evaluation (SemEval)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "622--627", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Sanchez-Cisneros and Fernando Aparicio Gali. 2013. UEM-UC3M: An ontology-based named en- tity recognition system for biomedical texts. In Proc. of the Seventh International Workshop on Semantic Evaluation (SemEval), pages 622-627. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Joint inference of entities, relations, and coreference", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiaping", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of the 2013 workshop on Automated knowledge base construction (AKBC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sameer Singh, Sebastian Riedel, Brian Martin, Jiaping Zheng, and Andrew McCallum. 2013. Joint infer- ence of entities, relations, and coreference. In Proc. of the 2013 workshop on Automated knowledge base construction (AKBC), pages 1-6.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Linguistic Structure Prediction", |
|
"authors": [ |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Noah A. Smith. 2011. Linguistic Structure Prediction. Morgan and Claypool.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "A machine learning approach to coreference resolution of noun phrases", |
|
"authors": [], |
|
"year": 2001, |
|
"venue": "Computational linguistics", |
|
"volume": "27", |
|
"issue": "4", |
|
"pages": "521--544", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wee Meng Soon, Hwee Tou Ng, and Daniel Chung Yong Lim. 2001. A machine learning ap- proach to coreference resolution of noun phrases. Computational linguistics, 27(4):521-544.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Identifying treatments, groups, and outcomes in medical abstracts", |
|
"authors": [ |
|
{ |
|
"first": "Rodney", |
|
"middle": [], |
|
"last": "Summerscales", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shlomo", |
|
"middle": [], |
|
"last": "Argamon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "Hupert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of the Sixth Midwest Computational Linguistics Colloquium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rodney Summerscales, Shlomo Argamon, Jordan Hu- pert, and Alan Schwartz. 2009. Identifying treat- ments, groups, and outcomes in medical abstracts. In Proc. of the Sixth Midwest Computational Lin- guistics Colloquium (MCLC). Indiana University.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "A simple distant supervision approach for the tac-kbp slot filling task", |
|
"authors": [ |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mcclosky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Tibshirani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Angel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Valentin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Spitkovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihai Surdeanu, David McClosky, Julie Tibshirani, John Bauer, Angel X Chang, Valentin I Spitkovsky, and Christopher D Manning. 2010. A simple distant supervision approach for the tac-kbp slot filling task.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "An introduction to conditional random fields. Foundations and Trends R in Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Sutton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "267--373", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charles Sutton, Andrew McCallum, et al. 2012. An introduction to conditional random fields. Founda- tions and Trends R in Machine Learning, 4(4):267- 373.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Extraction of evidence tables from abstracts of randomized clinical trials using a maximum entropy classifier and global constraints", |
|
"authors": [ |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Trenta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Hunter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonio Trenta, Anthony Hunter, and Sebastian Riedel. 2015. Extraction of evidence tables from abstracts of randomized clinical trials using a maximum entropy classifier and global constraints. CoRR, abs/1509.05209.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Extracting pico sentences from clinical trial reports using supervised distant supervision", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Byron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jo\u00ebl", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aakash", |
|
"middle": [], |
|
"last": "Kuiper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingxi", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iain", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Marshall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "The Journal of Machine Learning Research", |
|
"volume": "17", |
|
"issue": "1", |
|
"pages": "4572--4596", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Byron C Wallace, Jo\u00ebl Kuiper, Aakash Sharma, Mingxi Zhu, and Iain J Marshall. 2016. Extracting pico sen- tences from clinical trial reports using supervised distant supervision. The Journal of Machine Learn- ing Research, 17(1):4572-4596.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "SampleRank. Learning Preferences from Atomic Gradients", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Wick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Rohanimanesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Culotta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Mc-Callum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of the NIPS Workshop on Advances in Ranking", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--5", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Wick, K. Rohanimanesh, A. Culotta, and A. Mc- Callum. 2009. SampleRank. Learning Preferences from Atomic Gradients. In Proc. of the NIPS Work- shop on Advances in Ranking, pages 1-5.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Positionaware attention and supervised data improve slot filling", |
|
"authors": [ |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proc. of the 2017 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "35--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuhao Zhang, Victor Zhong, Danqi Chen, Gabor An- geli, and Christopher D Manning. 2017. Position- aware attention and supervised data improve slot fill- ing. In Proc. of the 2017 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 35-45.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Exploiting classification correlations for the extraction of evidence-based practice information", |
|
"authors": [ |
|
{ |
|
"first": "Jin", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Praveen", |
|
"middle": [], |
|
"last": "Bysani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of the AMIA Annual Symposium", |
|
"volume": "2012", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jin Zhao, Praveen Bysani, and Min-Yen Kan. 2012. Ex- ploiting classification correlations for the extraction of evidence-based practice information. In Proc. of the AMIA Annual Symposium, volume 2012, page 1070. American Medical Informatics Association.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Biomedical relation extraction: from binary to complex", |
|
"authors": [ |
|
{ |
|
"first": "Deyu", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dayou", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Computational and Mathematical Methods in Medicine", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deyu Zhou, Dayou Zhong, and Yulan He. 2014. Biomedical relation extraction: from binary to com- plex. Computational and Mathematical Methods in Medicine, 2014.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Schematized parallel multi chain + inference.", |
|
"uris": null, |
|
"type_str": "figure" |
|
} |
|
} |
|
} |
|
} |