|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:07:44.140021Z" |
|
}, |
|
"title": "Word centrality constrained representation for keyphrase extraction", |
|
"authors": [ |
|
{ |
|
"first": "Zelalem", |
|
"middle": [], |
|
"last": "Gero", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Emory University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Joyce", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Ho", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Emory University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "To keep pace with the increased generation and digitization of documents, automated methods that can improve search, discovery and mining of the vast body of literature are essential. Keyphrases provide a concise representation by identifying salient concepts in a document. Various supervised approaches model keyphrase extraction using local context to predict the label for each token and perform much better than the unsupervised counterparts. Unfortunately, this method fails for short documents where the context is unclear. Moreover, keyphrases, which are usually the gist of a document, need to be the central theme. We propose a new extraction model that introduces a centrality constraint to enrich the word representation of a Bidirectional long short-term memory. Performance evaluation on two publicly available datasets demonstrate our model outperforms existing state-of-the art approaches. Our model is publicly available at https://github.com/ZHgero/ keyphrases_centrality.git", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "To keep pace with the increased generation and digitization of documents, automated methods that can improve search, discovery and mining of the vast body of literature are essential. Keyphrases provide a concise representation by identifying salient concepts in a document. Various supervised approaches model keyphrase extraction using local context to predict the label for each token and perform much better than the unsupervised counterparts. Unfortunately, this method fails for short documents where the context is unclear. Moreover, keyphrases, which are usually the gist of a document, need to be the central theme. We propose a new extraction model that introduces a centrality constraint to enrich the word representation of a Bidirectional long short-term memory. Performance evaluation on two publicly available datasets demonstrate our model outperforms existing state-of-the art approaches. Our model is publicly available at https://github.com/ZHgero/ keyphrases_centrality.git", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Keyphrase extraction is an important information extraction task that identifies single or multi-word linguistic units that concisely represent a document. They can also serve to provide a brief summary of the document content. Keyphrases are widely used in variety of natural language processing tasks such as document summarization (Bharti and Babu, 2017; Sarkar, 2014) , query formulation (Jones and Staveley, 1999 ), text classification (Coenen et al., 2007) , clustering (Hammouda et al., 2005) , and recommendation systems (Naw and Hlaing, 2013) . Keyphrases have become increasingly important for biomedical documents as there has been an exponential growth with over 32 million articles indexed by PubMed (NLM) . Existing keyphrase extraction methods mainly fall either under a supervised or unsupervised approach. Common unsupervised approaches use word co-occurrence statistics to build graph-based ranking algorithms. Each word is mapped to a node and edges connect words that co-occur within a specified window size. Even though unsupervised approaches are desirable for datasets which do not have manually-labeled ground truth values, most such methods perform worse compared to the supervised counterparts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 334, |
|
"end": 357, |
|
"text": "(Bharti and Babu, 2017;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 358, |
|
"end": 371, |
|
"text": "Sarkar, 2014)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 417, |
|
"text": "(Jones and Staveley, 1999", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 462, |
|
"text": "(Coenen et al., 2007)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 476, |
|
"end": 499, |
|
"text": "(Hammouda et al., 2005)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 529, |
|
"end": 551, |
|
"text": "(Naw and Hlaing, 2013)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 713, |
|
"end": 718, |
|
"text": "(NLM)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The supervised approaches use classification to label every token as being part of a keyphrase or not by using features such as part-of speech tags, termfrequency inverse document frequency (tf-idf), and the position of the token in the document. Recently, supervised methods based on deep learning have been employed for keyphrase extraction. In Thomaidou and Vazirgiannis (2011) and Gollapalli et al. (2017) , the authors posed the problem as a sequence labeling task and applied a Long Short-Term Memory network (LSTM) and conditional random fields (CRF) to tag each token in document as positive (i.e., part of a keyphrase) or negative. While these approaches achieve much better performance, they still suffer from a major limitation when applied on biomedical literature. The task of labelling each token does not consider how central the token is to the document contents. For Figure 1 , the main theme of the keyphrases are genes associated with breast cancer. Thus, the document theme can be used as additional information to improve the keyphrase extraction performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 347, |
|
"end": 380, |
|
"text": "Thomaidou and Vazirgiannis (2011)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 409, |
|
"text": "Gollapalli et al. (2017)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 884, |
|
"end": 892, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To this end, we propose to address the problem of keyphrase extraction as a sequence labelling task with an additional component to capture the centrality of each token. We design a centrality layer built on top of a bidirectional LSTM (BiLSTM) layer to constrain each token with regards to the central theme of the document. The output dependencies are then modeled using a CRF layer. The contributions of our work are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Introducing a centrality constraint layer to better capture the main theme of the document and how strongly each token is related to the main theme.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Thorough evaluation of the centrality layer using an ablation study on biomedical and general domain abstracts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The next section presents a brief description of the related work. The proposed keyphrase extraction method is introduced in Section 3. Sections 4, and 5 present experimental results and conclusion respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Keyphrase extraction methods mainly take either supervised or unsupervised approach. Unsupervised approaches generate candidates and rank using features such as tf-idf and topic proportions (Barker and Cornacchia, 2000; Liu et al., 2009b) , graph-based centrality measures (Grineva et al., 2009; Wan and Xiao, 2008) , topic modeling (Liu et al., 2009a; Teneva and Cheng, 2017) , and document's citation network (Gollapalli and Caragea, 2014). Unsupervised, graph-based methods build a graph from the input document where all the candidate keyphrases are nodes and the connection between each candidate is represented by edges. A graph-based ranking method then determines the weights for each node based on the relatedness between the candidates. Alternatively, topic-based approaches cluster candidate keyphrases into topics in the document so that all the topics in the input document are represented by the selected keyphrases. Recently (Sun et al., 2020) proposed a sentence embedding model named SIFRank that uses autoregressive pre-trained language model to extract keyphrases from short documents. Yet unsupervised methods often fail to achieve state-of-the-art performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 219, |
|
"text": "(Barker and Cornacchia, 2000;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 220, |
|
"end": 238, |
|
"text": "Liu et al., 2009b)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 273, |
|
"end": 295, |
|
"text": "(Grineva et al., 2009;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 315, |
|
"text": "Wan and Xiao, 2008)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 352, |
|
"text": "(Liu et al., 2009a;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 376, |
|
"text": "Teneva and Cheng, 2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 940, |
|
"end": 958, |
|
"text": "(Sun et al., 2020)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Under the supervised approach, the keyphrase extraction problem is treated as a binary classification task (Alzaidy et al., 2019; Turney, 2000 Turney, , 2002 , where learning algorithms such as support vector machines (Witten et al., 2005; Jiang et al., 2009) and maximum entropy (Kim and Kan, 2009; Yih et al., 2006) are used. Supervised keyphrase extraction can also be posed as a ranking problem between candidates (Witten et al., 2005) . The candidates keys are extracted using statistical features (tf-idf, number of occurrences, first occurrence of the key) and structural features (part of speech tags).", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 129, |
|
"text": "(Alzaidy et al., 2019;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 130, |
|
"end": 142, |
|
"text": "Turney, 2000", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 143, |
|
"end": 157, |
|
"text": "Turney, , 2002", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 239, |
|
"text": "(Witten et al., 2005;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 259, |
|
"text": "Jiang et al., 2009)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 299, |
|
"text": "(Kim and Kan, 2009;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 317, |
|
"text": "Yih et al., 2006)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 439, |
|
"text": "(Witten et al., 2005)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Deep learning based models have also been used for keyphrase extraction. Word embeddings are used to measure the relatedness between words in graph-based models (Wang et al., 2014) . Zhang et al. (2016) used a Recurrent Neural Network (RNN) based approach to identify keyphrases in Twitter data. The model addresses the problem as sequence labeling for very short text, where a joint-layer RNN is used to capture the semantic dependencies in the input sequence. Alzaidy et al. (2019) employed a LSTM-CRF architecture to model keyphrase extraction as a sequence labelling task to learn the labels of the entire input sequence. Santosh et al. (2020) extended the LSTM-CRF to utilize BiLSTM and incorporated an attention mechanism to retrieve additional information from other sentences within the same document. Sahrawat et al. (2020) evaluated the effect of various pre-trained word embeddings for the BiLSTM-CRF architecture in extracting keyphrases from benchmark datasets and found contextual embeddings offered better performance. While these models offer better performance, they fail to capture the centrality of the keyphrases which represent a salient feature of the document. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 180, |
|
"text": "(Wang et al., 2014)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 183, |
|
"end": 202, |
|
"text": "Zhang et al. (2016)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 626, |
|
"end": 647, |
|
"text": "Santosh et al. (2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 810, |
|
"end": 832, |
|
"text": "Sahrawat et al. (2020)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The keyphrase extraction task is formulated as a sequence labelling task. Given a document X = w 1 , w 2 , \u2022 \u2022 \u2022 , w t where w i is the i th word and t is the number of words in the document, we predict the labels y = y 1 , y 2 , \u2022 \u2022 \u2022 , y t where each label y i is whether word w i is a keyphrase or not.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Each word in the document is represented by pre-trained low-dimensional vector representations. Any pre-trained vector representation can be used, and we experiment with various pre-trained embeddings such as GloVe (Pennington et al., 2014) , BERT (Devlin et al., 2019) and BioBERT (Lee et al., 2020) . The impact of each embedding type is discussed in the experiments section.", |
|
"cite_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 240, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 248, |
|
"end": 269, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 282, |
|
"end": 300, |
|
"text": "(Lee et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embedding Layer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "This layer is used to encode each document to obtain the local contextual representation. A forward and backward LSTMs are used to read the input sequence from left to right,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2212 \u2192 h 1 , \u2212 \u2192 h 2 , \u2022 \u2022 \u2022 , \u2212 \u2192 h t , and right to left, \u2190 \u2212 h 1 , \u2190 \u2212 h 2 , \u2022 \u2022 \u2022 , \u2190 \u2212 h t , respectively", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": ". The outputs from the two directions are concatenated and summed for the final hidden state representation of the document, H", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "= [ t i=1 \u2212 \u2192 h i , t i=1 \u2190 \u2212 h i ].", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Sequence labelling is commonly used for other token encoding tasks such as Named Entity Recognition (NER) where the task is to determine whether a token is a named entity or not. However, keyphrase extraction is different from other sequence labelling tasks (for example NER) in that the tokens should capture the main gist of the document. This is in contrast to NER where the importance of the token is irrelevant as long as it is a named entity. To incorporate the idea of centrality, we use the similarity between each token and the document embedding, H, to bias the model towards tokens which are central (i.e., similar) to the document.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Centrality Weighting Layer", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "For words {w 1 , w 2 , \u2022 \u2022 \u2022 , w t } in a document D, we compute the centrality weight for each word \u03b1 1 , \u03b1 2 , \u2022 \u2022 \u2022 , \u03b1 t . Each \u03b1 i is calculated as the cosine similarity between the document vector (H) and each word (w i ). This is then used to weight the document vector when concatenating with each word's representation from the BiLSTM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Centrality Weighting Layer", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The output representation, z i for each word is then the centrality weight, \u03b1 i multiplied by the output of the biLSTM,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Centrality Weighting Layer", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "z i = [\u03b1 i \u2212 \u2192 h i , \u03b1 i \u2190 \u2212 h i ].", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Centrality Weighting Layer", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "A dense layer is then used to transform the output representation, k i = f (z i ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Centrality Weighting Layer", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The obtained contextual representations of each word, k i are given as input sequence to a CRF layer. CRFs are widely used to model sequence labeling tasks (Lafferty et al., 2001) . Given the input document as sequence of tokens, CRF produces a probability distribution over the output label sequence using the dependencies among the labels of the entire input sequence. This formulation considers the correlations between neighboring labels and allows joint decoding for the best sequence of ) of the keyphrase, the model can learn a multi-token keyphrase. As an example, given a sentence with five tokens (t 1 , t 2 , t 3 , t 4 t 5 ) of which two (t 2 , t 3 ) are part of a keyphrase, the label would be represented as (t O , t B , t I , t O , t O ). Figure 2 illustrates our model architecture with the various layers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 179, |
|
"text": "(Lafferty et al., 2001)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 493, |
|
"end": 494, |
|
"text": ")", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 753, |
|
"end": 761, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields (CRF)", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Datasets. We ran our experiment on 2 publicly available keyphrase datasets: PubMed (Gero and Ho, 2019) and INSPEC (Hulth, 2003) . PubMed consists of 2532 articles from PubMed Central Open Access Subset with at least 5 author-provided keyphrases while INSPEC contains 200 abstracts of scientific journal papers from Computer Science collected between the years 1998 and 2002. Each document in INSPEC has two sets of keywords assigned: the controlled keywords, which are manually controlled assigned keywords that appear in the Inspec thesaurus but may not appear in the document, and the uncontrolled keywords which are freely assigned by the editors. The union of both sets is considered as the ground-truth in this work. Summary statistics for the datasets are shown in Table 1 . Since we use a sequence labeling formulation of the keyphrase extraction problem, the abstract/keyphrases data pairs are prepared such that each document is a sequence of word tokens, each with positive labels if it occurs in a keyphrase (k B , k I ), or with a negative label (k O ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 102, |
|
"text": "(Gero and Ho, 2019)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 114, |
|
"end": 127, |
|
"text": "(Hulth, 2003)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 771, |
|
"end": 778, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Experiment Settings. As baseline models, we train BiLSTM and BiLSTM-CRF with 100dimension Glove pre-trained embedding vectors (Pennington et al., 2014) . We also train BiLSTM-CRF with two 768-dimension contextual embeddings, BERT (Devlin et al., 2019) and BioBERT (Lee et al., 2020) . DAKE (Santosh et al., 2020) , a state-of-the art baseline, uses a sentence enrich- ing process from all the documents using sentence embedding. To replicate their work, we used the BERT model to extract sentence embeddings for each document and enrich the representation. Finally, our model is trained using BERT word embeddings for the INSPEC dataset and BioBERT embeddings for the PubMed dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 151, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 251, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 282, |
|
"text": "(Lee et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 290, |
|
"end": 312, |
|
"text": "(Santosh et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The results reported are from three runs using 80/20/20 split for train/val/test sets respectively. The BiLSTM, and BiLSTM-CRF are optimized during training using stochastic gradient descent with the learning rate 0.0001. Gradient clipping and drop-out are used to prevent overflow and overfitting. We select the model with the best F1 score on the validation set over three runs. The final test scores reported are the averages running the best model on the test sets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The code was implemented in Tensorflow 2.4.1 and the code is available at https://github.com/ZHgero/ keyphrases_centrality.git.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The performance comparisons between the baselines and our model are shown in Table 2 . Our model performs significantly better on the PubMed dataset compared to the existing baselines. In particular, the results show the impact of the centrality layer as it provides a boost in AUC of 0.02 from BiLSTM-CRF (BioBERT) to our model. The improvement gained from our model is not as large on the INSPEC dataset. We hypothesize that for the centrality constraint to be effective, the input sequence should be relatively longer. The sentences in the INSPEC dataset are much shorter hence the difficulty in learning the central theme.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 84, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We also compared our models with several state-of-the-art unsupervised approaches including SingleRank (Litvak and Last, 2008) , Position-Rank (Florescu and Caragea, 2017) , TopicRank (Bougouin et al., 2013) , and SIFRank (Sun et al., Table 3 presents the comparison on the PubMed dataset. Since the unsupervised methods are ranking-based methods, the performances are evaluated in terms of F1-measure when a fixed number of keyphrases are extracted. To convert our model into a ranking model, we compute the probability for the predicted keyphrases by using an independence assumption after calculating the marginal probabilities from the CRF layer. The results illustrate that our model outperforms previous unsupervised methods by a significant margin.", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 126, |
|
"text": "(Litvak and Last, 2008)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 143, |
|
"end": 171, |
|
"text": "(Florescu and Caragea, 2017)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 207, |
|
"text": "(Bougouin et al., 2013)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 222, |
|
"end": 234, |
|
"text": "(Sun et al.,", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 242, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In Figure 3 , we compare keyphrases tagged by the BioBERT model and our model on a sample abstract. The true positives are colored blue while false negatives are in red. We observe that the BioBERT model fails to identify 'chronic thromboembolic pulmonary hypertension' as an important keyphrase whereas our model correctly identifies it. This may be due to the single occurrence of 'pulmonary hypertension' in the input text. Meanwhile our model leverages the document embedding to 'understand' that pulmonary hypertension is semantically relevant in the context of the entire abstract. We also observe a similar pattern with the keyphrase 'duration of anticoagulation'. Even though both models fail to capture the entire phrase, our model identifies 'anticoagulation' as a strong candidate because of its semantic meaning in the context of the whole abstract.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The figure also illustrates the limitation of the models as both struggle with common words such as 'post' and 'high' that are attached as prefixes to important keywords. 'High risk', 'duration of' and 'post-' are considered unimportant by both models. This can be explained by the fact that such words usually occur outside a keyphrase boundary and get overlooked even when they appear with important words. False positives by both models are important terms as the phrases are very relevant in the context of abstract but were not selected by the authors. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we proposed a keyphrase extraction method that focuses on identifying words which are central to the document semantics. The problem of keyphrase extraction is posed as a sequence labeling task where each token is tagged as either a keyphrase or not. In addition to our novel centrality constraint layer, we have used Bi-LSTM layers to capture the long term dependencies among the input sequences. Finally, we have a CRF layer which is well suited to capture the dependencies from the output labels. Empirical results on two datasets show that our method gains significant improvement in the PubMed dataset while performing slightly better on the INSPEC dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work is supported by funding from the National Institute of Health awards #5K01LM012924 and #1R01LM013323.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Bi-lstm-crf sequence labeling for keyphrase extraction from scholarly documents", |
|
"authors": [ |
|
{ |
|
"first": "Rabah", |
|
"middle": [], |
|
"last": "Alzaidy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cornelia", |
|
"middle": [], |
|
"last": "Caragea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C. Lee", |
|
"middle": [], |
|
"last": "Giles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The World Wide Web Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2551--2557", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3308558.3313642" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rabah Alzaidy, Cornelia Caragea, and C. Lee Giles. 2019. Bi-lstm-crf sequence labeling for keyphrase extraction from scholarly documents. In The World Wide Web Conference, WWW 2019, San Francisco, CA, USA, May 13-17, 2019, pages 2551-2557.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Using noun phrase heads to extract document keyphrases", |
|
"authors": [ |
|
{ |
|
"first": "Ken", |
|
"middle": [], |
|
"last": "Barker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadia", |
|
"middle": [], |
|
"last": "Cornacchia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "conference of the canadian society for computational studies of intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "40--52", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ken Barker and Nadia Cornacchia. 2000. Using noun phrase heads to extract document keyphrases. In conference of the canadian society for computa- tional studies of intelligence, pages 40-52. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Automatic keyword extraction for text summarization: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Kumar", |
|
"middle": [], |
|
"last": "Santosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Korra Sathya", |
|
"middle": [], |
|
"last": "Bharti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Babu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.03242" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Santosh Kumar Bharti and Korra Sathya Babu. 2017. Automatic keyword extraction for text summariza- tion: A survey. arXiv preprint arXiv:1704.03242.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "TopicRank: Graph-based topic ranking for keyphrase extraction", |
|
"authors": [ |
|
{ |
|
"first": "Adrien", |
|
"middle": [], |
|
"last": "Bougouin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Boudin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B\u00e9atrice", |
|
"middle": [], |
|
"last": "Daille", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Sixth International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "543--551", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adrien Bougouin, Florian Boudin, and B\u00e9atrice Daille. 2013. TopicRank: Graph-based topic ranking for keyphrase extraction. In Proceedings of the Sixth In- ternational Joint Conference on Natural Language Processing, pages 543-551.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Statistical identification of key phrases for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Frans", |
|
"middle": [], |
|
"last": "Coenen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Leng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Sanderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanbo J", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "International Workshop on Machine Learning and Data Mining in Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "838--853", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Frans Coenen, Paul Leng, Robert Sanderson, and Yanbo J Wang. 2007. Statistical identification of key phrases for text classification. In International Work- shop on Machine Learning and Data Mining in Pat- tern Recognition, pages 838-853. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Position-Rank: An unsupervised approach to keyphrase extraction from scholarly documents", |
|
"authors": [ |
|
{ |
|
"first": "Corina", |
|
"middle": [], |
|
"last": "Florescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cornelia", |
|
"middle": [], |
|
"last": "Caragea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1105--1115", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Corina Florescu and Cornelia Caragea. 2017. Position- Rank: An unsupervised approach to keyphrase ex- traction from scholarly documents. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1105-1115.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Namedkeys: Unsupervised keyphrase extraction for biomedical documents", |
|
"authors": [ |
|
{ |
|
"first": "Zelalem", |
|
"middle": [], |
|
"last": "Gero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Joyce", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 10th ACM International Conference on Bioinformatics, Computational Biology and Health Informatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "328--337", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zelalem Gero and Joyce C Ho. 2019. Namedkeys: Unsupervised keyphrase extraction for biomedical documents. In Proceedings of the 10th ACM In- ternational Conference on Bioinformatics, Compu- tational Biology and Health Informatics, pages 328- 337.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Extracting keyphrases from research papers using citation networks", |
|
"authors": [ |
|
{ |
|
"first": "Cornelia", |
|
"middle": [], |
|
"last": "Sujatha Das Gollapalli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Caragea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Twenty-Eighth AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1629--1635", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sujatha Das Gollapalli and Cornelia Caragea. 2014. Extracting keyphrases from research papers using citation networks. In Proceedings of the Twenty- Eighth AAAI Conference on Artificial Intelligence, July 27 -31, 2014, Qu\u00e9bec City, Qu\u00e9bec, Canada, pages 1629-1635.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Incorporating expert knowledge into keyphrase extraction", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoli", |
|
"middle": [], |
|
"last": "Sujatha Das Gollapalli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3180--3187", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sujatha Das Gollapalli, Xiaoli Li, and Peng Yang. 2017. Incorporating expert knowledge into keyphrase ex- traction. In Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence, February 4-9, 2017, San Francisco, California, USA, pages 3180- 3187.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Extracting key terms from noisy and multitheme documents", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Grineva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxim", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Grinev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitry", |
|
"middle": [], |
|
"last": "Lizorkin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 18th International Conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "661--670", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1526709.1526798" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria P. Grineva, Maxim N. Grinev, and Dmitry Li- zorkin. 2009. Extracting key terms from noisy and multitheme documents. In Proceedings of the 18th International Conference on World Wide Web, WWW 2009, Madrid, Spain, April 20-24, 2009, pages 661- 670.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Corephrase: Keyphrase extraction for document clustering", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Khaled", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diego", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Hammouda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Matute", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kamel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "International workshop on machine learning and data mining in pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "265--274", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Khaled M Hammouda, Diego N Matute, and Mo- hamed S Kamel. 2005. Corephrase: Keyphrase ex- traction for document clustering. In International workshop on machine learning and data mining in pattern recognition, pages 265-274. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Improved automatic keyword extraction given more linguistic knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Anette", |
|
"middle": [], |
|
"last": "Hulth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the 2003 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "216--223", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anette Hulth. 2003. Improved automatic keyword ex- traction given more linguistic knowledge. In Pro- ceedings of the 2003 Conference on Empirical Meth- ods in Natural Language Processing, pages 216- 223.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A ranking approach to keyphrase extraction", |
|
"authors": [ |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunhua", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 32nd international ACM SIGIR conference on Research and development in information retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "756--757", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Jiang, Yunhua Hu, and Hang Li. 2009. A ranking approach to keyphrase extraction. In Proceedings of the 32nd international ACM SIGIR conference on Research and development in information retrieval, pages 756-757.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Phrasier: a system for interactive document retrieval using keyphrases", |
|
"authors": [ |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Staveley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of the 22nd annual international ACM SIGIR conference on Research and development in information retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "160--167", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steve Jones and Mark S Staveley. 1999. Phrasier: a system for interactive document retrieval using keyphrases. In Proceedings of the 22nd annual in- ternational ACM SIGIR conference on Research and development in information retrieval, pages 160- 167.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Re-examining automatic keyphrase extraction approaches in scientific articles", |
|
"authors": [ |
|
{ |
|
"first": "Nam", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Workshop on Multiword Expressions: Identification, Interpretation, Disambiguation and Applications (MWE 2009)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Nam Kim and Min-Yen Kan. 2009. Re-examining automatic keyphrase extraction approaches in sci- entific articles. In Proceedings of the Workshop on Multiword Expressions: Identification, Inter- pretation, Disambiguation and Applications (MWE 2009), pages 9-16.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [ |
|
"C N" |
|
], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the Eighteenth International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "282--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. 2001. Conditional random fields: Probabilistic models for segmenting and labeling se- quence data. In Proceedings of the Eighteenth Inter- national Conference on Machine Learning (ICML 2001), Williams College, Williamstown, MA, USA, June 28 -July 1, 2001, pages 282-289.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Biobert: a pre-trained biomedical language representation model for biomedical text mining", |
|
"authors": [ |
|
{ |
|
"first": "Jinhyuk", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wonjin", |
|
"middle": [], |
|
"last": "Yoon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungdong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghyeon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunkyu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chan", |
|
"middle": [], |
|
"last": "Ho So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaewoo", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Bioinformatics", |
|
"volume": "36", |
|
"issue": "4", |
|
"pages": "1234--1240", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2020. Biobert: a pre-trained biomed- ical language representation model for biomedical text mining. Bioinformatics, 36(4):1234-1240.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Graph-based keyword extraction for single-document summarization", |
|
"authors": [ |
|
{ |
|
"first": "Marina", |
|
"middle": [], |
|
"last": "Litvak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Last", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Coling 2008: Proceedings of the workshop Multi-source Multilingual Information Extraction and Summarization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "17--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marina Litvak and Mark Last. 2008. Graph-based keyword extraction for single-document summariza- tion. In Coling 2008: Proceedings of the work- shop Multi-source Multilingual Information Extrac- tion and Summarization, pages 17-24.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Unsupervised approaches for automatic keyword extraction using meeting transcripts", |
|
"authors": [ |
|
{ |
|
"first": "Feifan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deana", |
|
"middle": [], |
|
"last": "Pennell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of Human Language Technologies: The", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Feifan Liu, Deana Pennell, Fei Liu, and Yang Liu. 2009a. Unsupervised approaches for automatic key- word extraction using meeting transcripts. In Pro- ceedings of Human Language Technologies: The", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "620--628", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Conference of the North American Chapter of the Association for Computational Lin- guistics, pages 620-628.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Clustering to find exemplar terms for keyphrase extraction", |
|
"authors": [ |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yabin", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "257--266", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiyuan Liu, Peng Li, Yabin Zheng, and Maosong Sun. 2009b. Clustering to find exemplar terms for keyphrase extraction. In Proceedings of the 2009 Conference on Empirical Methods in Natural Lan- guage Processing, pages 257-266.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Relevant words extraction method for recommendation system", |
|
"authors": [ |
|
{ |
|
"first": "Naw", |
|
"middle": [], |
|
"last": "Naw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ei", |
|
"middle": [ |
|
"Ei" |
|
], |
|
"last": "Hlaing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Bulletin of Electrical Engineering and Informatics", |
|
"volume": "2", |
|
"issue": "3", |
|
"pages": "169--176", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naw Naw and Ei Ei Hlaing. 2013. Relevant words ex- traction method for recommendation system. Bul- letin of Electrical Engineering and Informatics, 2(3):169-176.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "GloVe: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 Confer- ence on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Keyphrase extraction as sequence labeling using contextualized embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Dhruva", |
|
"middle": [], |
|
"last": "Sahrawat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Debanjan", |
|
"middle": [], |
|
"last": "Mahata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haimin", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mayank", |
|
"middle": [], |
|
"last": "Kulkarni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Agniv", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rakesh", |
|
"middle": [], |
|
"last": "Gosangi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Stent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaman", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rajiv Ratn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zimmermann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "European Conference on Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "328--335", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dhruva Sahrawat, Debanjan Mahata, Haimin Zhang, Mayank Kulkarni, Agniv Sharma, Rakesh Gosangi, Amanda Stent, Yaman Kumar, Rajiv Ratn Shah, and Roger Zimmermann. 2020. Keyphrase extraction as sequence labeling using contextualized embeddings. In European Conference on Information Retrieval, pages 328-335. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Dake: Document-level attention for keyphrase extraction", |
|
"authors": [], |
|
"year": null, |
|
"venue": "European Conference on Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "392--401", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tokala Yaswanth Sri Sai Santosh, Debarshi Kumar Sanyal, Plaban Kumar Bhowmick, and Partha Pra- tim Das. 2020. Dake: Document-level attention for keyphrase extraction. In European Conference on Information Retrieval, pages 392-401. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A keyphrase-based approach to text summarization for english and bengali documents", |
|
"authors": [ |
|
{ |
|
"first": "Kamal", |
|
"middle": [], |
|
"last": "Sarkar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Journal of Technology Diffusion (IJTD)", |
|
"volume": "5", |
|
"issue": "2", |
|
"pages": "28--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kamal Sarkar. 2014. A keyphrase-based approach to text summarization for english and bengali docu- ments. International Journal of Technology Diffu- sion (IJTD), 5(2):28-38.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Sifrank: A new baseline for unsupervised keyphrase extraction based on pre-trained language model", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hangping", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhongwei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaoran", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Access", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "10896--10906", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Sun, Hangping Qiu, Yu Zheng, Zhongwei Wang, and Chaoran Zhang. 2020. Sifrank: A new base- line for unsupervised keyphrase extraction based on pre-trained language model. IEEE Access, 8:10896- 10906.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Salience rank: Efficient keyphrase extraction with topic modeling", |
|
"authors": [ |
|
{ |
|
"first": "Nedelina", |
|
"middle": [], |
|
"last": "Teneva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "530--535", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nedelina Teneva and Weiwei Cheng. 2017. Salience rank: Efficient keyphrase extraction with topic mod- eling. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Vol- ume 2: Short Papers), pages 530-535.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Multiword keyword recommendation system for online advertising", |
|
"authors": [ |
|
{ |
|
"first": "Stamatina", |
|
"middle": [], |
|
"last": "Thomaidou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michalis", |
|
"middle": [], |
|
"last": "Vazirgiannis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "International Conference on Advances in Social Networks Analysis and Mining, ASONAM 2011", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "423--427", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ASONAM.2011.70" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stamatina Thomaidou and Michalis Vazirgiannis. 2011. Multiword keyword recommendation system for on- line advertising. In International Conference on Advances in Social Networks Analysis and Min- ing, ASONAM 2011, Kaohsiung, Taiwan, 25-27 July 2011, pages 423-427.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Learning algorithms for keyphrase extraction", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Information retrieval", |
|
"volume": "2", |
|
"issue": "4", |
|
"pages": "303--336", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter D Turney. 2000. Learning algorithms for keyphrase extraction. Information retrieval, 2(4):303-336.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Learning to extract keyphrases from text", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter D Turney. 2002. Learning to extract keyphrases from text. arXiv preprint cs/0212013.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Single document keyphrase extraction using neighborhood knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Xiaojun", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianguo", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "AAAI", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "855--860", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaojun Wan and Jianguo Xiao. 2008. Single doc- ument keyphrase extraction using neighborhood knowledge. In AAAI, volume 8, pages 855-860.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Corpus-independent generic keyphrase extraction using word embedding vectors", |
|
"authors": [ |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Software Engineering Research Conference", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rui Wang, Wei Liu, and Chris McDonald. 2014. Corpus-independent generic keyphrase extraction using word embedding vectors. In Software Engi- neering Research Conference, volume 39, pages 1- 8.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Kea: Practical automated keyphrase extraction", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gordon", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Witten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eibe", |
|
"middle": [], |
|
"last": "Paynter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carl", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Craig G Nevill-Manning", |
|
"middle": [], |
|
"last": "Gutwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Design and Usability of Digital Libraries: Case Studies in the Asia Pacific", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "129--152", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian H Witten, Gordon W Paynter, Eibe Frank, Carl Gutwin, and Craig G Nevill-Manning. 2005. Kea: Practical automated keyphrase extraction. In Design and Usability of Digital Libraries: Case Studies in the Asia Pacific, pages 129-152.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Finding advertising keywords on web pages", |
|
"authors": [ |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Wen-Tau Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Vitor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Carvalho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 15th international conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "213--222", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1135777.1135813" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wen-tau Yih, Joshua Goodman, and Vitor R. Carvalho. 2006. Finding advertising keywords on web pages. In Proceedings of the 15th international conference on World Wide Web, WWW 2006, Edinburgh, Scot- land, UK, May 23-26, 2006, pages 213-222.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Keyphrase extraction using deep recurrent neural networks on Twitter", |
|
"authors": [ |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yeyun", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuanjing", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "836--845", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qi Zhang, Yang Wang, Yeyun Gong, and Xuanjing Huang. 2016. Keyphrase extraction using deep re- current neural networks on Twitter. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 836-845.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "Figure 1 shows a PubMed document with the authorspecified keyphrases highlighted in blue." |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "An example document from PubMed with author-provided keyphrases in blue." |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "Our model architecture with the BiLSTM, centrality weighting, and CRF layer." |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "Comparison of keyphrases tagged by two models. True positives are colored blue while false negatives are in red.Purple represents keys that are false positive." |
|
}, |
|
"TABREF0": { |
|
"content": "<table><tr><td>Dataset</td><td colspan=\"2\">PubMed INSPEC</td></tr><tr><td>Tot. documents</td><td>2532</td><td>500</td></tr><tr><td>Tot. # of tokens</td><td>654389</td><td>67200</td></tr><tr><td>Tot. # of keyphrases</td><td>31871</td><td>4912</td></tr><tr><td>Avg. # of keyphrases</td><td>12.5</td><td>9.8</td></tr><tr><td colspan=\"3\">labels for the input sequence, rather than decoding</td></tr><tr><td colspan=\"3\">each label independently. Moreover, by utilizing</td></tr><tr><td colspan=\"3\">two different labels for the keyphrase to denote</td></tr><tr><td colspan=\"3\">the beginning (t B ) and intermediate part (t I</td></tr></table>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"text": "Datasets used for experiments" |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td>Model</td><td colspan=\"2\">PubMed INSPEC</td></tr><tr><td>BiLSTM (GloVe)</td><td>0.543</td><td>0.427</td></tr><tr><td>BiLSTM-CRF (GloVe)</td><td>0.554</td><td>0.453</td></tr><tr><td>BiLSTM-CRF (BERT)</td><td>0.604</td><td>0.581</td></tr><tr><td>BiLSTM-CRF (BioBERT)</td><td>0.622</td><td>0.464</td></tr><tr><td>DAKE</td><td>0.623</td><td>0.463</td></tr><tr><td>Ours</td><td>0.644</td><td>0.586</td></tr></table>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"text": "Model performance on different datasets" |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td>Model</td><td colspan=\"3\">F1@5 F1@10 F1@15</td></tr><tr><td>SingleRank</td><td>15.2</td><td>16.3</td><td>19.2</td></tr><tr><td colspan=\"2\">PositionRank 18.3</td><td>18.3</td><td>20.9</td></tr><tr><td>TopicRank</td><td>26.4</td><td>28.7</td><td>29.2</td></tr><tr><td>SIFRank</td><td>32.3</td><td>48.4</td><td>56.2</td></tr><tr><td>Ours</td><td>34.8</td><td>53.1</td><td>62.6</td></tr><tr><td>2020).</td><td/><td/><td/></tr></table>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"text": "Ranking comparison on the PubMed dataset" |
|
} |
|
} |
|
} |
|
} |