|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T09:51:56.488240Z" |
|
}, |
|
"title": "Biomedical Event Extraction with Hierarchical Knowledge Graphs", |
|
"authors": [ |
|
{ |
|
"first": "Kung-Hsiang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": { |
|
"country": "California" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mu", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": { |
|
"country": "California" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": { |
|
"country": "California" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Biomedical event extraction is critical in understanding biomolecular interactions described in scientific corpus. One of the main challenges is to identify nested structured events that are associated with non-indicative trigger words. We propose to incorporate domain knowledge from Unified Medical Language System (UMLS) to a pre-trained language model via a hierarchical graph representation encoded by a proposed Graph Edgeconditioned Attention Networks (GEANet). To better recognize the trigger words, each sentence is first grounded to a sentence graph based on a jointly modeled hierarchical knowledge graph from UMLS. The grounded graphs are then propagated by GEANet, a novel graph neural networks for enhanced capabilities in inferring complex events. On BioNLP 2011 GENIA Event Extraction task, our approach achieved 1.41% F 1 and 3.19% F 1 improvements on all events and complex events, respectively. Ablation studies confirm the importance of GEANet and hierarchical KG.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Biomedical event extraction is critical in understanding biomolecular interactions described in scientific corpus. One of the main challenges is to identify nested structured events that are associated with non-indicative trigger words. We propose to incorporate domain knowledge from Unified Medical Language System (UMLS) to a pre-trained language model via a hierarchical graph representation encoded by a proposed Graph Edgeconditioned Attention Networks (GEANet). To better recognize the trigger words, each sentence is first grounded to a sentence graph based on a jointly modeled hierarchical knowledge graph from UMLS. The grounded graphs are then propagated by GEANet, a novel graph neural networks for enhanced capabilities in inferring complex events. On BioNLP 2011 GENIA Event Extraction task, our approach achieved 1.41% F 1 and 3.19% F 1 improvements on all events and complex events, respectively. Ablation studies confirm the importance of GEANet and hierarchical KG.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Biomedical event extraction is a task that identifies a set of actions among proteins or genes that are associated with biological processes from natural language texts (Kim et al., 2009 (Kim et al., , 2011 . Development of biomedical event extraction tools enables many downstream applications, such as domain-specific text mining (Ananiadou et al., 2015; Spangher et al., 2020) , semantic search engines (Miyao et al., 2006) and automatic population and enrichment of database (Hirschman et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 186, |
|
"text": "(Kim et al., 2009", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 187, |
|
"end": 206, |
|
"text": "(Kim et al., , 2011", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 356, |
|
"text": "(Ananiadou et al., 2015;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 379, |
|
"text": "Spangher et al., 2020)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 406, |
|
"end": 426, |
|
"text": "(Miyao et al., 2006)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 503, |
|
"text": "(Hirschman et al., 2012)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A typical event extraction system 1) finds triggers that most clearly demonstrate the presence of events, 2) recognizes the protein participants (arguments), and 3) associates the arguments with the corresponding event triggers. For instance, the Nodes associated with the tokens in the example sentence are boldfaced. Bidirectional edges imply hierarchical relation between concept and semantic nodes. The word \"induces\" is a trigger of a Positive regulation event, whose trigger role and corresponding argument role cannot be easily determined with only textual input. The KG provides clues for identifying this trigger and its corresponding arguments given the red and blue double line reasoning paths connecting nodes BMP-6, Induce, Phosphorylation, and Positive regulation of biological process. We can infer that: 1) \"induces\" is an action of biological function, 2) a biological function can be quantified by positive regulation, and 3) positive regulation can result in phosphorylation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "sentence \"Protein A inhibits the expression of Protein B\" will be annotated with two nested events: Gene expression(Trigger: expression, Arg-Theme: Protein B) and Negative Regulation(Trigger: inhibits, Arg-Theme: Gene expression(Protein B), Arg-Cause: Protein A). Early attempts on biomedical event extraction adopted hand-crafted features (Bj\u00f6rne et al., 2009; Bj\u00f6rne and Salakoski, 2011; Riedel and McCallum, 2011; Venugopal et al., 2014a) . Recent advances have shown improvements using deep neural networks via distributional word representations in the biomedical domain (Moen and Ananiadou, 2013; Rao et al., 2017a; Bj\u00f6rne and Salakoski, 2018; ShafieiBavani et al., 2019) . further extends the word representations with embeddings of descriptive annotations from a knowledge base and demonstrates the importance of domain knowledge in biomedical event extraction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 340, |
|
"end": 361, |
|
"text": "(Bj\u00f6rne et al., 2009;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 362, |
|
"end": 389, |
|
"text": "Bj\u00f6rne and Salakoski, 2011;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 390, |
|
"end": 416, |
|
"text": "Riedel and McCallum, 2011;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 417, |
|
"end": 441, |
|
"text": "Venugopal et al., 2014a)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 576, |
|
"end": 602, |
|
"text": "(Moen and Ananiadou, 2013;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 603, |
|
"end": 621, |
|
"text": "Rao et al., 2017a;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 622, |
|
"end": 649, |
|
"text": "Bj\u00f6rne and Salakoski, 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 650, |
|
"end": 677, |
|
"text": "ShafieiBavani et al., 2019)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, encoding knowledge with distributional embeddings does not provide adequate clues for identifying challenging events with nonindicative trigger words and nested structures. These embeddings do not contain structural or relational information about the biomedical entities. To overcome this challenge, we present a framework that incorporates knowledge from hierarchical knowledge graphs with graph neural networks (GNN) on top of a pre-trained language model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our first contribution is a novel representation of knowledge as hierarchical knowledge graphs containing both conceptual and semantic reasoning paths that enable better trigger and word identification based on Unified Medical Language System (UMLS), a biomedical knowledge base. Fig. 1 shows an example where the Positive Regulation event can be better identified with knowledge graphs and factual relational reasoning. Our second contribution is a new GNN, Graph Edgeconditioned Attention Networks (GEANet), that encodes complex domain knowledge. By integrating edge information into the attention mechanism, GEANet has greater capabilities in reasoning the plausibility of different event structure through factual relational paths in knowledge graphs (KGs).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 280, |
|
"end": 287, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Experiments show that our proposed method achieved state-of-the-art results on the BioNLP 2011 event extraction task (Kim et al., 2011 (Bodenreider, 2004) . We use the former two sources to build hierarchical KGs. The concept network from Metathesaurus contains the relationship between each biomedical concept pairs, while each concept contains one or more semantic types 1 Our code for pre-proecessing, modeling, and evaluation is available at https://github.com/PlusLabNLP/ GEANet-BioMed-Event-Extraction. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 134, |
|
"text": "(Kim et al., 2011", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 135, |
|
"end": 154, |
|
"text": "(Bodenreider, 2004)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "v 3 v 2 v 1 v 2 v 3 v 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Figure 2: Overview of knowledge incorporation. Contextualized embeddings for each token are generated by SciBERT. GEANet updates node embeddings for v 1 , v 2 , and v 3 via corresponding sentence graph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "that can be found in the semantic network. The concept network provides direct definition lookup of the recognized biomedical terms, while the semantic network supports with additional knowledge in the semantic aspect. Example tuples can be found in Figure 1 . 2 There are 3.35M concepts, 10 concept relations, 182 semantic types, and 49 semantic relations in total.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 250, |
|
"end": 258, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our event extraction framework builds upon the pre-trained language model, SciBERT (Beltagy et al., 2019) , and supplement it with a novel graph neural network model, GEANet, that encodes domain knowledge from hierarchical KGs. We will first illustrate each component and discuss how training and inference are done.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 105, |
|
"text": "(Beltagy et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The two knowledge sources discussed in Section 2 are jointly modeled as a hierarchical graph for each sentence, which we refer to as a sentence graph. Each sentence graph construction consists of three steps: concept mapping, concept network construction, and semantic type augmentation. The first step is to map each sentence in the corpus to UMLS biomedical concepts with MetaMap, an entity mapping tool for UMLS concepts (Aronson, 2001 ). There are 7903 concepts (entities) being mapped from the corpus, denoted as K. The next step is concept network construction, where a minimum spanning tree (MST) that connects mapped concepts in the previous step is identified, forming concept reasoning paths. This step is NPcomplete. 3 We adopt a 2-approximate solution that constructs a global MST for the corpora GE'11 by running breadth-first search, assuming all edges are of unit distance. To prune out less relevant nodes and to improve computation efficiency, concept nodes that are not in K with less than T neighbors in K are removed. 4 The spanning tree for each sentence is then obtained by depth-first search on the global MST. Each matched token in the corpus is also included as a token node in the sentence graph, connecting with corresponding concept node. Finally, the semantic types for each concept node are modeled as nodes that are linked with associated concept nodes in the sentence graph. Two semantic type nodes will also be linked if they have known relationships in the semantic network.", |
|
"cite_spans": [ |
|
{ |
|
"start": 424, |
|
"end": 438, |
|
"text": "(Aronson, 2001", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 728, |
|
"end": 729, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1038, |
|
"end": 1039, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Knowledge Graph Modeling", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The majority of existing graph neural networks (GNN) consider only hidden states of nodes and adjacency matrix without modeling edge information. To properly model the hierarchy of the graph, it is essential for the message passing function of a GNN to consider edge features. We propose Graph Edge Conditioned Attention Networks (GEANet) to integrate edge features into the attention mechanism for message propagation. The node embeddings update of GEANet at the l-th layer can be expressed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GEANet", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "x (l) i = MLP \u03b8 x (l\u22121) i + j\u2208N (i) ai,j \u2022 x (l\u22121) j (1) ai,j = exp (MLP \u03c8 (ei,j)) k\u2208N (i) exp (MLP \u03c8 (e i,k ))", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "GEANet", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where x", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GEANet", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "i denotes the node embeddings at layer l, e i,j denotes the embedding for edge (i, j), and MLP \u03c8 and MLP \u03b8 are two multi-layer perceptrons.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GEANet", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "GEANet is inspired by Edge Conditioned Convolution (ECC), where convolution operation depends on edge type (Simonovsky and Komodakis, 2017),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GEANet", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "x (l) i = MLP \u03b8 x (l\u22121) i + j\u2208N (i) x (l\u22121) j \u2022 MLP \u03c8 (ei,j) (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GEANet", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Compared to ECC, GEANet is able to determine the relative importance of neighboring nodes with attention mechanism.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GEANet", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Knowledge Incorporation. We build GEANet on top of SciBERT (Peters et al., 2019) to incorporate domain knowledge into rich contextualized representations. Specifically, we take the contextual embeddings {h 1 , ..., h n } produced by SciB-ERT as inputs and produces knowledge-aware embeddings {\u0125 1 , ...,\u0125 n } as outputs. To initialize the embeddings for a sentence graph, for a mapped token, we project its SciBERT contextual embedding to initialize its corresponding node embedding h i,KG = h i W KG + b KG . Other nodes and edges are initialized by pretrained KG embeddings (details in Section 4.1). To accommodate multiple relations between two entities in UMLS, edge embeddings e i,j are initialized by summing the embeddings of each relation between the nodes i and j. Then we apply layers of GEANet to encode the graph h l i,KG = GEANet(h i,KG ). The knowledgeaware representation is obtained by aggregating SciBERT representations and KG representations,", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 80, |
|
"text": "(Peters et al., 2019)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GEANet", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "h i = h l i,KG W LM + b LM + h i . 5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GEANet", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The process is illustrated in Figure 2 GEANet layer.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 38, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "GEANet", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The entire framework is trained with a multitask learning pipeline consisting of trigger classification and argument classification, following (Han et al., 2019a,b) . Trigger classification predicts the trigger type for each token. The predicted score of each token is computed as\u0177 tri i = MLP tri (\u0125 i ). In the argument classification stage, each possible pair of gold trigger and gold entity is gathered and labeled with corresponding argument role. 6 The argument scores between the i-th token and j-th token are computed as\u0177 arg i,j = MLP arg (\u0125 i ;\u0125 j ), where (; ) denotes concatenation. Cross Entropy", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 164, |
|
"text": "(Han et al., 2019a,b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Extraction", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "loss L t = \u2212 1 N t N t i=1 y t i \u2022 log\u0177 t i ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Extraction", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "is used for both tasks, where t denotes task, N t denotes the number of training instances of task t, y t i denotes the ground truth label, and\u0177 t i denotes the predicted label. The multitask learning minimizes the sum of the two losses L = L tri + L arg in the training stage. During inference, unmerging is conducted to combine identified triggers and arguments for multiple arguments events (Bj\u00f6rne and Salakoski, 2011) . We adopted similar unmerging heuristics. For Regulation events, we use the same heuristics as Bj\u00f6rne et al. (2009) . For Binding events, we subsume all Theme arguments associated with a trigger 5\u0125 i = hi for each token i without mapped concept. 6 During inference, predicted triggers are used instead. into one event such that every trigger corresponds to only one single Binding event.", |
|
"cite_spans": [ |
|
{ |
|
"start": 394, |
|
"end": 422, |
|
"text": "(Bj\u00f6rne and Salakoski, 2011)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 519, |
|
"end": 539, |
|
"text": "Bj\u00f6rne et al. (2009)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 670, |
|
"end": 671, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Extraction", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Our models are evaluated on BioNLP 11 GENIA event extraction task (GE'11 ). All models were trained on the training set, validated on the dev set, and tested on the test set. A separate evaluation on Regulation events is conducted to validate the effectiveness of our framework on nested events with non-indicative trigger word. Reported results are obtained from the official evaluator under approximate span and recursive criteria. In the preprocessing step, the GE'11 corpora were parsed with TEES preprocessing pipeline (Bj\u00f6rne and Salakoski, 2018) . Tokenization is done by the SciBERT tokenizer. Biomedical concepts in each sentence are then recognized with MetaMap and aligned with their corresponding tokens. The best performing model was found by grid search conducted on the dev set. The edge and node representation in KGs were intialized with 300 dimensional pre-trained embeddings using TransE (Wang et al., 2014) . The entire framework is optimized with BERTAdam optimizer for a maximum of 100 epochs with batch size of 4. Training is stopped if the dev set F 1 does not improve for 5 consecutive epochs (more details see Appendix).", |
|
"cite_spans": [ |
|
{ |
|
"start": 524, |
|
"end": 552, |
|
"text": "(Bj\u00f6rne and Salakoski, 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 907, |
|
"end": 926, |
|
"text": "(Wang et al., 2014)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Comparison with existing methods We compare our method with the following prior works: TEES and Stacked Gen. use SVM-based models with token and sentence-level features (Bj\u00f6rne and Salakoski, 2011; Majumder et al., 2016) ; TEES CNN leverages Convolutional Neural Networks and dependency parsing graph (Bj\u00f6rne and Salakoski, 2018) ; KB-driven T-LSTM adopts an external knowledge base with type and sentence embeddings, into a Tree-LSTM model . SciBERT-FT is a fine-tuned SciB-ERT without external resources, the knowledgeagnostic counterpart of GEANet-SciBERT. According to Table 1 , SciBERT-FT achieves similar performance to KB-driven T-LSTM, implying that SciBERT may have stored domain knowledge implicitly during pre-training. Similar hypothesis has also been studied in commonsense reasoning (Wang et al., 2019) . GEANet-SciBERT achieves an absolute improvement of 1.41% in F 1 on the test data compared to the previous state-of-theart method. In terms of Regulation events, Table 2 shows that GEANet-SciBERT outperforms the previous system and fine-tuned SciBERT by 3.19% and 1.39% in F 1. Ablation study To better understand the importance of different model components, ablation study is conducted and summarized in Table 3 . GEANet achieves the highest F 1 when compared to two other GNN variants, ECC and GAT (Veli\u010dkovi\u0107 et al., 2018) , demonstrating its stronger knowledge incorporation capacity. Hierarchical knowledge graph representation is also shown to be critical. Removing semantic type (STY) nodes from hierarchical KGs leads to performance drop. Impact of amount of training data Model performance on different amount of randomly sampled training data is shown in Fig. 3 . GEANet-SciBERT shows consistent improvement over finetuned SciBERT across different fractions. The performance gain is slightly larger with less training data. This illustrates the robustness of GEANet in integrating domain knowledge and its particular advantage under low-resource setting.", |
|
"cite_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 197, |
|
"text": "(Bj\u00f6rne and Salakoski, 2011;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 198, |
|
"end": 220, |
|
"text": "Majumder et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 301, |
|
"end": 329, |
|
"text": "(Bj\u00f6rne and Salakoski, 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 797, |
|
"end": 816, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 1320, |
|
"end": 1345, |
|
"text": "(Veli\u010dkovi\u0107 et al., 2018)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 573, |
|
"end": 580, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 980, |
|
"end": 988, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 1225, |
|
"end": 1232, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 1685, |
|
"end": 1691, |
|
"text": "Fig. 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Error Analysis By comparing the predictions from GEANet-SciBERT and gold events in the dev set, two major failed cases are identified:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Adjective Trigger: Most events are associated with a verb or noun trigger. Adjective triggers are scarce in the training set (\u223c7%), which poses a challenge to identify this type of trigger. Although knowledge-aware methods should be able to resolve these errors theoretically, these adjective triggers often cannot be linked with UMLS concepts. Without proper grounding, it is hard for our model to recognize these triggers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Misleading Trigger: Triggers providing \"clues\" about incorrect events can be misleading. For instance, Furthermore, expression of an activated PKD1 mutant enhances HPK1-mediated NFkappaB activation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Our model predicts expression as a trigger of type Gene expression, while the gold label is Positive regulation. Despite that fact that our model is capable of handling such scenarios sometimes given grounded biomedical concepts and factual reasoning paths, there is still room for improvement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Event Extraction Most existing event extraction systems focus on extracting events in news. Early attempts relied on hand-crafted features and a pipeline architecture (Gupta and Ji, 2009; Li et al., 2013) . Later studies gained significant improvement from neural architectures, such as convolutional neural networks (Chen et al., 2015; Nguyen and Grishman, 2015) , and recurrent neural networks (Nguyen et al., 2016) . More recent studies leverages large pre-trained language models to obtain richer contextual information (Wadden et al., 2019; Lin et al., 2020) . Another line of works utilized GNN to enhance event extraction performance. applied attention-based graph convolution networks on dependency parsing trees. We instead propose a GNN, GEANet, for integrating domain knowledge into contextualized embeddings from pre-trained language models. Biomedial Event Extraction Event extraction for biomedicine is more challenging due to higher demand for domain knowledge. BioNLP 11 GE-NIA event extraction task (GE'11 ) is the major benchmark for measuring the quality of biomedical event extraction system (Kim et al., 2011) . Similar to event extraction in news domain, initial studies tackle biomedical event extraction with humanengineered features and pipeline approaches (Miwa et al., 2012; Bj\u00f6rne and Salakoski, 2011) . Great portion of recent works observed significant gains from neural models (Venugopal et al., 2014b; Rao et al., 2017b; Jagannatha and Yu, 2016; Bj\u00f6rne and Salakoski, 2018) . incorporated information from Gene Ontology, a biomedical knowledge base, into tree-LSTM models with distributional representations. Instead, our strategy is to model two knowledge graphs from UMLS hierarchically with conceptual and semantic reasoning paths, providing stronger clues for identifying challenging events in biomedical corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 187, |
|
"text": "(Gupta and Ji, 2009;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 204, |
|
"text": "Li et al., 2013)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 336, |
|
"text": "(Chen et al., 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 363, |
|
"text": "Nguyen and Grishman, 2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 417, |
|
"text": "(Nguyen et al., 2016)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 524, |
|
"end": 545, |
|
"text": "(Wadden et al., 2019;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 546, |
|
"end": 563, |
|
"text": "Lin et al., 2020)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1112, |
|
"end": 1130, |
|
"text": "(Kim et al., 2011)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1282, |
|
"end": 1301, |
|
"text": "(Miwa et al., 2012;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1302, |
|
"end": 1329, |
|
"text": "Bj\u00f6rne and Salakoski, 2011)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1408, |
|
"end": 1433, |
|
"text": "(Venugopal et al., 2014b;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 1434, |
|
"end": 1452, |
|
"text": "Rao et al., 2017b;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1453, |
|
"end": 1477, |
|
"text": "Jagannatha and Yu, 2016;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1478, |
|
"end": 1505, |
|
"text": "Bj\u00f6rne and Salakoski, 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We have proposed a framework to incorporate domain knowledge for biomedical event extraction. Evaluation results on GE'11 demonstrated the efficacy of GEANet and hierarchical KG representation in improving extraction of non-indicative trigger words associated nested events. We also show that our method is robust when applied to different amount of training data, while being advantageous in low-resource scenarios. Future works include grounding adjective triggers to knowledge bases, better biomedical knowledge representation and extracting biomedical events at document level.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "There are several bi-directional relations between some concepts. We only show one direction for simplicity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finding a MST on a subset of nodes (K) is known as a Steiner tree problem.4 T is empirically set to be 35.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank Rujun Han for helpful advice during the development of our model. We also appreciate insightful feedback from PLUSLab members, and the anonymous reviewers. This research was sponsored by an NIH R01 (LM012592) and the Intelligence Advanced Research Projects Activity (IARPA), via Contract No. 2019-19051600007. The views and conclusions of this paper are those of the authors and do not reflect the official policy or position of NIH, IARPA, or the US government.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our models are implemented in PyTorch (Paszke et al., 2019) . Hyper-parameters are found by grid search within search range listed in Table 4 . The hyper-parameters of the best performing model are summarized in 5. All experiments are conducted on a 12-CPU machine running CentOS Linux 7 (Core) and NVIDIA RTX 2080 with CUDA 10.1.To pre-train KGE, we leverage the TransE implementation from OpenKE (Han et al., 2018) . All tuples associated with the selected nodes described in Section 3.1 are used for pre-training with margin loss and negative sampling,where \u03b3 denotes margin, and d(x, x ) denotes the \u2212 1 distance between x and x . h and t are embeddings of head and tail entities from the gold training sets S with relation . (h , ,t ) denotes a corrupted tuplet with either the head or tail entity replaced by a random entity. TransE is optimized using Adam (Kingma and Ba, 2015) with hyperparameters illustrated in Table 6 . Every 50 epochs, the model checkpoint is saved if the mean reciprocal rank on the development set improve from the last checkpoint; otherwise, training will be stopped.", |
|
"cite_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 59, |
|
"text": "(Paszke et al., 2019)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 416, |
|
"text": "(Han et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 141, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 921, |
|
"end": 928, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Implementation Details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The statistics of GE'11 is shown in 7. The corpus contains 14496 events with 37.2% containing nested structure (Bj\u00f6rne and Salakoski, 2011) . 7 We use the official dataset split for all the results reported.Hyper-parameter Range Relation MLP dim.{300, 500, 700, 1000} Trigger MLP dim.{300, 500, 700, 1000} Learning rate { 1 \u00d7 10 \u22125 , 3 \u00d7 10 \u22125 , 5 \u00d7 10 \u22125 } Table 4 : Hyper-paramter search range for fine-tuning SciBERT.7 The dataset can be downloaded from http://bionlpst.dbcls.jp/GE/2011/downloads/. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 139, |
|
"text": "(Bj\u00f6rne and Salakoski, 2011)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 143, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 365, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Dataset", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Event-based text mining for biology and functional genomics", |
|
"authors": [ |
|
{ |
|
"first": "Sophia", |
|
"middle": [], |
|
"last": "Ananiadou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Thompson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raheel", |
|
"middle": [], |
|
"last": "Nawaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Mcnaught", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas B", |
|
"middle": [], |
|
"last": "Kell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Briefings in functional genomics", |
|
"volume": "14", |
|
"issue": "3", |
|
"pages": "213--230", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sophia Ananiadou, Paul Thompson, Raheel Nawaz, John McNaught, and Douglas B Kell. 2015. Event-based text mining for biology and func- tional genomics. Briefings in functional genomics, 14(3):213-230.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Effective mapping of biomedical text to the umls metathesaurus: the metamap program", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Alan R Aronson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the AMIA Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan R Aronson. 2001. Effective mapping of biomed- ical text to the umls metathesaurus: the metamap program. In Proceedings of the AMIA Symposium, page 17. American Medical Informatics Associa- tion.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "SciB-ERT: A pretrained language model for scientific text", |
|
"authors": [ |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3615--3620", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1371" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. SciB- ERT: A pretrained language model for scientific text. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 3615- 3620, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Extracting complex biological events with rich graphbased feature sets", |
|
"authors": [ |
|
{ |
|
"first": "Jari", |
|
"middle": [], |
|
"last": "Bj\u00f6rne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juho", |
|
"middle": [], |
|
"last": "Heimonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Filip", |
|
"middle": [], |
|
"last": "Ginter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antti", |
|
"middle": [], |
|
"last": "Airola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tapio", |
|
"middle": [], |
|
"last": "Pahikkala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tapio", |
|
"middle": [], |
|
"last": "Salakoski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the BioNLP 2009 Workshop Companion Volume for Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "10--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jari Bj\u00f6rne, Juho Heimonen, Filip Ginter, Antti Airola, Tapio Pahikkala, and Tapio Salakoski. 2009. Ex- tracting complex biological events with rich graph- based feature sets. In Proceedings of the BioNLP 2009 Workshop Companion Volume for Shared Task, pages 10-18, Boulder, Colorado. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Generalizing biomedical event extraction", |
|
"authors": [ |
|
{ |
|
"first": "Jari", |
|
"middle": [], |
|
"last": "Bj\u00f6rne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tapio", |
|
"middle": [], |
|
"last": "Salakoski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of BioNLP Shared Task 2011 Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "183--191", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jari Bj\u00f6rne and Tapio Salakoski. 2011. Generaliz- ing biomedical event extraction. In Proceedings of BioNLP Shared Task 2011 Workshop, pages 183- 191, Portland, Oregon, USA. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Biomedical event extraction using convolutional neural networks and dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Jari", |
|
"middle": [], |
|
"last": "Bj\u00f6rne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tapio", |
|
"middle": [], |
|
"last": "Salakoski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the BioNLP 2018 workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "98--108", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-2311" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jari Bj\u00f6rne and Tapio Salakoski. 2018. Biomedi- cal event extraction using convolutional neural net- works and dependency parsing. In Proceedings of the BioNLP 2018 workshop, pages 98-108, Mel- bourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The unified medical language system (umls): integrating biomedical terminology", |
|
"authors": [ |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Bodenreider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Nucleic acids research", |
|
"volume": "32", |
|
"issue": "1", |
|
"pages": "267--270", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Olivier Bodenreider. 2004. The unified medical lan- guage system (umls): integrating biomedical termi- nology. Nucleic acids research, 32(suppl 1):D267- D270.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Event extraction via dynamic multipooling convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Yubo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liheng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daojian", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "167--176", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P15-1017" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yubo Chen, Liheng Xu, Kang Liu, Daojian Zeng, and Jun Zhao. 2015. Event extraction via dynamic multi- pooling convolutional neural networks. In Proceed- ings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th Interna- tional Joint Conference on Natural Language Pro- cessing (Volume 1: Long Papers), pages 167-176, Beijing, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Predicting unknown time arguments based on cross-event propagation", |
|
"authors": [ |
|
{ |
|
"first": "Prashant", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the ACL-IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Prashant Gupta and Heng Ji. 2009. Predicting un- known time arguments based on cross-event prop- agation. In Proceedings of the ACL-IJCNLP 2009", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Association for Computational Linguistics", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Conference Short Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "369--372", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Conference Short Papers, pages 369-372, Suntec, Singapore. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Deep structured neural network for event temporal relation extraction", |
|
"authors": [ |
|
{ |
|
"first": "Rujun", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mu", |
|
"middle": [], |
|
"last": "Hsu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aram", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Galstyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The 2019 SIGNLL Conference on Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rujun Han, I Hsu, Mu Yang, Aram Galstyan, Ralph Weischedel, and Nanyun Peng. 2019a. Deep struc- tured neural network for event temporal relation ex- traction. In The 2019 SIGNLL Conference on Com- putational Natural Language Learning (CoNLL).", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Joint event and temporal relation extraction with shared representations and structured prediction", |
|
"authors": [ |
|
{ |
|
"first": "Rujun", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Ning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "434--444", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1041" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rujun Han, Qiang Ning, and Nanyun Peng. 2019b. Joint event and temporal relation extraction with shared representations and structured prediction. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 434- 444, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Openke: An open toolkit for knowledge embedding", |
|
"authors": [ |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shulin", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lv", |
|
"middle": [], |
|
"last": "Xin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yankai", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juanzi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xu Han, Shulin Cao, Lv Xin, Yankai Lin, Zhiyuan Liu, Maosong Sun, and Juanzi Li. 2018. Openke: An open toolkit for knowledge embedding. In Proceed- ings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Text mining for the biocuration workflow", |
|
"authors": [ |
|
{ |
|
"first": "Lynette", |
|
"middle": [], |
|
"last": "Hirschman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Gully", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Burns", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cecilia", |
|
"middle": [], |
|
"last": "Krallinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Arighi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alfonso", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cathy", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Valencia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Chatr-Aryamontri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Dowell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Huala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Database", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lynette Hirschman, Gully AP Burns, Martin Krallinger, Cecilia Arighi, K Bretonnel Cohen, Alfonso Valencia, Cathy H Wu, Andrew Chatr- Aryamontri, Karen G Dowell, Eva Huala, et al. 2012. Text mining for the biocuration workflow. Database, 2012.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Bidirectional RNN for medical event detection in electronic health records", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Abhyuday", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Jagannatha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "473--482", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1056" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhyuday N Jagannatha and Hong Yu. 2016. Bidi- rectional RNN for medical event detection in elec- tronic health records. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 473-482, San Diego, California. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Overview of BioNLP'09 shared task on event extraction", |
|
"authors": [ |
|
{ |
|
"first": "Jin-Dong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoko", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshinobu", |
|
"middle": [], |
|
"last": "Kano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the BioNLP 2009 Workshop Companion Volume for Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jin-Dong Kim, Tomoko Ohta, Sampo Pyysalo, Yoshi- nobu Kano, and Jun'ichi Tsujii. 2009. Overview of BioNLP'09 shared task on event extraction. In Pro- ceedings of the BioNLP 2009 Workshop Companion Volume for Shared Task, pages 1-9, Boulder, Col- orado. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Overview of bionlp shared task", |
|
"authors": [ |
|
{ |
|
"first": "Jin-Dong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoko", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Bossy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngan", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the BioNLP shared task 2011 workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jin-Dong Kim, Sampo Pyysalo, Tomoko Ohta, Robert Bossy, Ngan Nguyen, and Jun'ichi Tsujii. 2011. Overview of bionlp shared task 2011. In Proceed- ings of the BioNLP shared task 2011 workshop, pages 1-6. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederick P Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In International Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Biomedical event extraction based on knowledgedriven tree-lstm", |
|
"authors": [ |
|
{ |
|
"first": "Diya", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lifu", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Heng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1421--1430", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diya Li, Lifu Huang, Heng Ji, and Jiawei Han. 2019. Biomedical event extraction based on knowledge- driven tree-lstm. In Proceedings of the 2019 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Pa- pers), pages 1421-1430.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Joint event extraction via structured prediction with global features", |
|
"authors": [ |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Heng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "73--82", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qi Li, Heng Ji, and Liang Huang. 2013. Joint event extraction via structured prediction with global fea- tures. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 73-82, Sofia, Bulgaria. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A joint neural model for information extraction with global features", |
|
"authors": [ |
|
{ |
|
"first": "Ying", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lingfei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7999--8009", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.713" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ying Lin, Heng Ji, Fei Huang, and Lingfei Wu. 2020. A joint neural model for information extraction with global features. In Proceedings of the 58th Annual Meeting of the Association for Computational Lin- guistics, pages 7999-8009, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Jointly multiple events extraction via attentionbased graph information aggregation", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhunchen", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heyan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1247--1256", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1156" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao Liu, Zhunchen Luo, and Heyan Huang. 2018. Jointly multiple events extraction via attention- based graph information aggregation. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1247-1256, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Biomolecular event extraction using a stacked generalization based classifier", |
|
"authors": [ |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asif", |
|
"middle": [], |
|
"last": "Ekbal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sudip Kumar", |
|
"middle": [], |
|
"last": "Naskar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 13th International Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--64", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amit Majumder, Asif Ekbal, and Sudip Kumar Naskar. 2016. Biomolecular event extraction using a stacked generalization based classifier. In Proceedings of the 13th International Conference on Natural Lan- guage Processing, pages 55-64, Varanasi, India. NLP Association of India.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Boosting automatic event extraction from the literature using domain adaptation and coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Makoto", |
|
"middle": [], |
|
"last": "Miwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Thompson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophia", |
|
"middle": [], |
|
"last": "Ananiadou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Bioinformatics", |
|
"volume": "28", |
|
"issue": "13", |
|
"pages": "1759--1765", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Makoto Miwa, Paul Thompson, and Sophia Ananiadou. 2012. Boosting automatic event extraction from the literature using domain adaptation and coreference resolution. Bioinformatics, 28(13):1759-1765.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Semantic retrieval for the accurate identification of relational concepts in massive textbases", |
|
"authors": [ |
|
{ |
|
"first": "Yusuke", |
|
"middle": [], |
|
"last": "Miyao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoko", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katsuya", |
|
"middle": [], |
|
"last": "Masuda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshimasa", |
|
"middle": [], |
|
"last": "Tsuruoka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuhiro", |
|
"middle": [], |
|
"last": "Yoshida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takashi", |
|
"middle": [], |
|
"last": "Ninomiya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 21st International Conference on Computational Linguistics and 44th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1017--1024", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1220175.1220303" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yusuke Miyao, Tomoko Ohta, Katsuya Masuda, Yoshi- masa Tsuruoka, Kazuhiro Yoshida, Takashi Ni- nomiya, and Jun'ichi Tsujii. 2006. Semantic re- trieval for the accurate identification of relational concepts in massive textbases. In Proceedings of the 21st International Conference on Computational Linguistics and 44th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 1017- 1024, Sydney, Australia. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Distributional semantics resources for biomedical text processing", |
|
"authors": [], |
|
"year": 2013, |
|
"venue": "Proceedings of LBM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "39--44", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "SPFGH Moen and Tapio Salakoski2 Sophia Anani- adou. 2013. Distributional semantics resources for biomedical text processing. Proceedings of LBM, pages 39-44.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Joint event extraction via recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Thien Huu Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "300--309", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1034" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen, Kyunghyun Cho, and Ralph Gr- ishman. 2016. Joint event extraction via recurrent neural networks. In Proceedings of the 2016 Con- ference of the North American Chapter of the As- sociation for Computational Linguistics: Human Language Technologies, pages 300-309, San Diego, California. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Event detection and domain adaptation with convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Huu", |
|
"middle": [], |
|
"last": "Thien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "365--371", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P15-2060" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen and Ralph Grishman. 2015. Event detection and domain adaptation with convolutional neural networks. In Proceedings of the 53rd Annual Meeting of the Association for Computational Lin- guistics and the 7th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 365-371, Beijing, China. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Pytorch: An imperative style, high-performance deep learning library", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Paszke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Massa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lerer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bradbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Chanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Killeen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeming", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Gimelshein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Antiga", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8024--8035", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. 2019. Pytorch: An imperative style, high-performance deep learning library. In Ad- vances in Neural Information Processing Systems, pages 8024-8035.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Knowledge enhanced contextual word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Logan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vidur", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--54", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1005" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E. Peters, Mark Neumann, Robert Logan, Roy Schwartz, Vidur Joshi, Sameer Singh, and Noah A. Smith. 2019. Knowledge enhanced contextual word representations. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 43-54, Hong Kong, China. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Biomedical event extraction using abstract meaning representation", |
|
"authors": [ |
|
{ |
|
"first": "Sudha", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "126--135", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sudha Rao, Daniel Marcu, Kevin Knight, and Hal Daum\u00e9 III. 2017a. Biomedical event extraction us- ing abstract meaning representation. In BioNLP 2017, pages 126-135.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Biomedical event extraction using Abstract Meaning Representation", |
|
"authors": [ |
|
{ |
|
"first": "Sudha", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "126--135", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-2315" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sudha Rao, Daniel Marcu, Kevin Knight, and Hal Daum\u00e9 III. 2017b. Biomedical event extraction us- ing Abstract Meaning Representation. In BioNLP 2017, pages 126-135, Vancouver, Canada,. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Fast and robust joint models for biomedical event extraction", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Riedel and Andrew McCallum. 2011. Fast and robust joint models for biomedical event extrac- tion. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 1-12.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Global locality in event extraction", |
|
"authors": [ |
|
{ |
|
"first": "Elaheh", |
|
"middle": [], |
|
"last": "Shafieibavani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [ |
|
"Jimeno" |
|
], |
|
"last": "Yepes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.04822" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elaheh ShafieiBavani, Antonio Jimeno Yepes, and Xu Zhong. 2019. Global locality in event extraction. arXiv preprint arXiv:1909.04822.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Dynamic edge-conditioned filters in convolutional neural networks on graphs", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Simonovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikos", |
|
"middle": [], |
|
"last": "Komodakis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3693--3702", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Simonovsky and Nikos Komodakis. 2017. Dy- namic edge-conditioned filters in convolutional neu- ral networks on graphs. In Proceedings of the IEEE conference on computer vision and pattern recogni- tion, pages 3693-3702.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Enabling low-resource transfer learning across covid-19 corpora by combining event-extraction and co-training", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Spangher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "May", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emilio", |
|
"middle": [], |
|
"last": "Ferrara", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ACL 2020 Workshop on Natural Language Processing for COVID-19 (NLP-COVID)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander Spangher, Nanyun Peng, Jonathan May, and Emilio Ferrara. 2020. Enabling low-resource trans- fer learning across covid-19 corpora by combining event-extraction and co-training. In ACL 2020 Work- shop on Natural Language Processing for COVID- 19 (NLP-COVID).", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Graph attention networks", |
|
"authors": [ |
|
{ |
|
"first": "Petar", |
|
"middle": [], |
|
"last": "Veli\u010dkovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillem", |
|
"middle": [], |
|
"last": "Cucurull", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arantxa", |
|
"middle": [], |
|
"last": "Casanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adriana", |
|
"middle": [], |
|
"last": "Romero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Li\u00f2", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Petar Veli\u010dkovi\u0107, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Li\u00f2, and Yoshua Bengio. 2018. Graph attention networks. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Relieving the computational bottleneck: Joint inference for event extraction with high-dimensional features", |
|
"authors": [ |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Venugopal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vibhav", |
|
"middle": [], |
|
"last": "Gogate", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "831--843", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deepak Venugopal, Chen Chen, Vibhav Gogate, and Vincent Ng. 2014a. Relieving the computational bottleneck: Joint inference for event extraction with high-dimensional features. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 831-843.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Relieving the computational bottleneck: Joint inference for event extraction with high-dimensional features", |
|
"authors": [ |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Venugopal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vibhav", |
|
"middle": [], |
|
"last": "Gogate", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "831--843", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1090" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deepak Venugopal, Chen Chen, Vibhav Gogate, and Vincent Ng. 2014b. Relieving the computational bottleneck: Joint inference for event extraction with high-dimensional features. In Proceedings of the 2014 Conference on Empirical Methods in Natu- ral Language Processing (EMNLP), pages 831-843, Doha, Qatar. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Entity, relation, and event extraction with contextualized span representations", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Wadden", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulme", |
|
"middle": [], |
|
"last": "Wennberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5784--5789", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1585" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Wadden, Ulme Wennberg, Yi Luan, and Han- naneh Hajishirzi. 2019. Entity, relation, and event extraction with contextualized span representations. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 5784- 5789, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Does it make sense? and why? a pilot study for sense making and explanation", |
|
"authors": [ |
|
{ |
|
"first": "Cunxiang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuailong", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaonan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tian", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4020--4026", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1393" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cunxiang Wang, Shuailong Liang, Yue Zhang, Xiao- nan Li, and Tian Gao. 2019. Does it make sense? and why? a pilot study for sense making and ex- planation. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 4020-4026, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Knowledge graph embedding by translating on hyperplanes", |
|
"authors": [ |
|
{ |
|
"first": "Zhen", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianwen", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianlin", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Twenty-Eighth AAAI conference on artificial intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhen Wang, Jianwen Zhang, Jianlin Feng, and Zheng Chen. 2014. Knowledge graph embedding by trans- lating on hyperplanes. In Twenty-Eighth AAAI con- ference on artificial intelligence.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "An example of a UMLS-based hierarchical KG assisting event extraction. Circles represent concept nodes and triangles represent semantic nodes.", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Performance comparison on the test set w.r.t. different amount of training data.", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"text": "", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>Theme</td><td/><td>Theme</td><td/><td/><td/></tr><tr><td>Protein</td><td>Gene expression</td><td>None</td><td>Positive regulation</td><td/><td>None</td></tr><tr><td/><td/><td/><td/><td/><td>MLP</td></tr><tr><td>\u0125 i</td><td/><td/><td/><td/><td/></tr><tr><td>h i,</td><td/><td/><td/><td/><td/></tr><tr><td>h i</td><td/><td/><td/><td/><td/></tr><tr><td/><td/><td>SciBERT</td><td/><td/><td/></tr><tr><td>Sialoadhesin</td><td>expression</td><td>was</td><td>functional</td><td>\u2026\u2026</td><td>RBCs</td></tr><tr><td/><td/><td/><td colspan=\"2\">GEANet</td><td/></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "Model comparison on GE'11 test set.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>Model</td><td>Recall Prec. F1</td></tr><tr><td>KB-driven T-LSTM</td><td>41.73 55.73 47.72</td></tr><tr><td>SciBERT-FT</td><td>45.39 54.48 49.52</td></tr><tr><td>GEANet-SciBERT</td><td>47.23 55.21 50.91</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"text": "Performance comparison on the Regulation events of the test set (including Regulation, Positive Regulation, and Negative Regulation sub-events).", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"text": "Ablation study over different components.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |