|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T06:44:24.348713Z" |
|
}, |
|
"title": "Synthetic vs. Real Reference Strings for Citation Parsing, and the Importance of Re-training and Out-Of-Sample Data for Meaningful Evaluations: Experiments with GROBID, GIANT and CORA", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Grennan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Trinity College Dublin", |
|
"location": { |
|
"country": "Ireland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Joeran", |
|
"middle": [], |
|
"last": "Beel", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Siegen", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Citation parsing, particularly with deep neural networks, suffers from a lack of training data as available datasets typically contain only a few thousand training instances. Manually labelling citation strings is very timeconsuming, hence, synthetically created training data could be a solution. However, as of now, it is unknown if synthetically created reference-strings are suitable to train machine learning algorithms for citation parsing. To find out, we train Grobid, which uses Conditional Random Fields, with a) humanlabelled reference strings from 'real' bibliographies and b) synthetically created reference strings from the GIANT dataset. We find 1 that both synthetic and organic reference strings are equally suited for training Grobid (F1 = 0.74). We additionally find that retraining Grobid has a notable impact on its performance, for both synthetic and real data (+30% in F1). Having as many types of labelled fields as possible during training also improves effectiveness, even if these fields are not available in the evaluation data (+13.5% F1). We conclude that synthetic data is suitable for training (deep) citation parsing models. We further suggest that in future evaluations of reference parsing tools, both evaluation data being similar and data being dissimilar to the training data should be used to obtain more meaningful results.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Citation parsing, particularly with deep neural networks, suffers from a lack of training data as available datasets typically contain only a few thousand training instances. Manually labelling citation strings is very timeconsuming, hence, synthetically created training data could be a solution. However, as of now, it is unknown if synthetically created reference-strings are suitable to train machine learning algorithms for citation parsing. To find out, we train Grobid, which uses Conditional Random Fields, with a) humanlabelled reference strings from 'real' bibliographies and b) synthetically created reference strings from the GIANT dataset. We find 1 that both synthetic and organic reference strings are equally suited for training Grobid (F1 = 0.74). We additionally find that retraining Grobid has a notable impact on its performance, for both synthetic and real data (+30% in F1). Having as many types of labelled fields as possible during training also improves effectiveness, even if these fields are not available in the evaluation data (+13.5% F1). We conclude that synthetic data is suitable for training (deep) citation parsing models. We further suggest that in future evaluations of reference parsing tools, both evaluation data being similar and data being dissimilar to the training data should be used to obtain more meaningful results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Accurate citation data is needed by publishers, academic search engines, citation & research-paper recommender systems and others to calculate impact metrics (Nisa Bakkalbasi et al., 2006; Jacso, 2008) , rank search results (Beel and Gipp, 2009a,b) , generate recommendations (Beel et al., 2016; Eto, 2019; F\u00e4rber et al., 2018; F\u00e4rber and Jatowt, 2020; Jia and Saule, 2018; Livne et al., 2014) and other applications e.g. in the field of bibliometric-enhanced information retrieval (Cabanac et al., 2020) . Citation data is often parsed from unstructured bibliographies found in PDF files on the Web (Figure 1 ). To facilitate the parsing process, a dozen (Tkaczyk et al., 2018a) open source tools were developed including ParsCit (Councill et al., 2008) , Grobid (Lopez, 2009 (Lopez, , 2013 , and Cermine (Tkaczyk et al., 2015) . Grobid is typically considered the most effective one (Tkaczyk et al., 2018a) . There is ongoing research that continuously leads to novel citation-parsing algorithms including deep learning algorithms Bhardwaj et al., 2017; Nasar et al., 2018; Prasad et al., 2018; Rizvi et al., 2019; Rodrigues Alves et al., 2018; Zhang, 2018) and meta-learned ensembles (Tkaczyk et al., 2018c,b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 158, |
|
"end": 188, |
|
"text": "(Nisa Bakkalbasi et al., 2006;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 201, |
|
"text": "Jacso, 2008)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 248, |
|
"text": "(Beel and Gipp, 2009a,b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 295, |
|
"text": "(Beel et al., 2016;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 306, |
|
"text": "Eto, 2019;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 327, |
|
"text": "F\u00e4rber et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 328, |
|
"end": 352, |
|
"text": "F\u00e4rber and Jatowt, 2020;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 373, |
|
"text": "Jia and Saule, 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 393, |
|
"text": "Livne et al., 2014)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 504, |
|
"text": "(Cabanac et al., 2020)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 656, |
|
"end": 679, |
|
"text": "(Tkaczyk et al., 2018a)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 731, |
|
"end": 754, |
|
"text": "(Councill et al., 2008)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 764, |
|
"end": 776, |
|
"text": "(Lopez, 2009", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 777, |
|
"end": 791, |
|
"text": "(Lopez, , 2013", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 806, |
|
"end": 828, |
|
"text": "(Tkaczyk et al., 2015)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 885, |
|
"end": 908, |
|
"text": "(Tkaczyk et al., 2018a)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1033, |
|
"end": 1055, |
|
"text": "Bhardwaj et al., 2017;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1056, |
|
"end": 1075, |
|
"text": "Nasar et al., 2018;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1076, |
|
"end": 1096, |
|
"text": "Prasad et al., 2018;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1097, |
|
"end": 1116, |
|
"text": "Rizvi et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1117, |
|
"end": 1146, |
|
"text": "Rodrigues Alves et al., 2018;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1147, |
|
"end": 1159, |
|
"text": "Zhang, 2018)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1187, |
|
"end": 1212, |
|
"text": "(Tkaczyk et al., 2018c,b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 600, |
|
"end": 609, |
|
"text": "(Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Most parsing tools apply supervised machine learning (Tkaczyk et al., 2018a) and require labelled training data. However, training data is rare compared to other disciplines where datasets may have millions of instances. To the best of our knowledge, existing citation-parsing datasets typically contain a few thousand instances and are domain specific ( Figure 2 ). This may be sufficient for traditional machine learning algorithms but not for deep learning, which shows a lot of potential for citation parsing Bhardwaj et al., 2017; Nasar et al., 2018; Prasad et al., 2018; Rizvi et al., 2019) . Even for traditional machine learning, existing datasets may not be ideal as they often lack diversity in terms of citation styles.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 76, |
|
"text": "(Tkaczyk et al., 2018a)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 513, |
|
"end": 535, |
|
"text": "Bhardwaj et al., 2017;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 536, |
|
"end": 555, |
|
"text": "Nasar et al., 2018;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 556, |
|
"end": 576, |
|
"text": "Prasad et al., 2018;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 577, |
|
"end": 596, |
|
"text": "Rizvi et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 355, |
|
"end": 363, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recently, we published GIANT, a synthetic dataset with nearly 1 billion annotated reference strings (Grennan et al., 2019) . More precisely, the dataset contains 677,000 unique reference strings, each in around 1,500 citation styles (e.g. APA, Figure 1 : Illustration of a 'Bibliography' with four 'Reference Strings', each with a number of 'Fields'. A reference parser receives a reference string as input, and outputs labelled fields, e.g. authors=\"C. Lemke ...\"; title=\"Metalearning: a survey ...\"; ...", |
|
"cite_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 122, |
|
"text": "(Grennan et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 252, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": ". The dataset was synthetically created. This means, the reference strings are not 'real' reference strings extracted from 'real' bibliographies. Instead, we downloaded 677,000 references in XML format from CrossRef, and used Citeproc-JS (Frank G. Bennett, 2011) with 1,500 citation styles to convert the 677,000 references into a total of 1 billion annotated citation strings (1,500 * 677,000). 2 We wonder how suitable a synthetic dataset like GIANT is to train machine learning models for citation parsing. Therefore, we pursue the following research question:", |
|
"cite_spans": [ |
|
{ |
|
"start": 396, |
|
"end": 397, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Harvard, ACM)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1. How will citation parsing perform when trained on synthetic reference strings, compared to being trained on real reference strings?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Harvard, ACM)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Potentially, synthetic data could lead to higher citation parsing performance, as synthetic datasets may contain more data and more diverse data (more citation styles). Synthetic datasets like GIANT could potentially also advance (deep) citation parsing, which currently suffers from a lack of 'real' annotated bibliographies at large scale.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Harvard, ACM)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In addition to the above research question, we aimed to answer the following questions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Harvard, ACM)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "2. To what extent does citation-parsing (based on machine learning) depend on the amount of training data?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Harvard, ACM)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "3. How important is re-training a citation parser for the specific data it should be used on? Or, in other words, how does performance vary if the test data differs (not) from the training data?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Harvard, ACM)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Is it important to have many different fields (author, year, . . . ) for training, even if the fields are not available in the final data?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We are aware of eleven datasets ( Figure 2 ) with annotated reference strings. The most popular ones are probably Cora and CiteSeer. Researchers also often use variations of PubMed. Several datasets are from the same authors, and many datasets include data from other datasets. For instance, the Grobid dataset is based on some data from Cora, PubMed, and others (Lopez, 2020) . New data is continuously added to Grobid's dataset. As such, there is not \"the one\" Grobid dataset. GIANT (Grennan et al., 2019) is the largest and most diverse dataset in terms of citation styles, but GIANT is, as mentioned, synthetically created.", |
|
"cite_spans": [ |
|
{ |
|
"start": 363, |
|
"end": 376, |
|
"text": "(Lopez, 2020)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 485, |
|
"end": 507, |
|
"text": "(Grennan et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 42, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Cora is one of the most widely used datasets but has potential shortcomings (Anzaroot and Mc-Callum, 2013; Councill et al., 2008; Prasad et al., 2018) . Cora is homogeneous with citation strings only from Computer Science. It is relatively small and only has labels for \"coarse-grained fields\" (Anzaroot and McCallum, 2013) . For example, the author field does not label each author separately. Prasad et al. conclude that a \"shortcoming of [citation parsing research] is that the evaluations have been largely limited to the Cora dataset, which is [...] unrepresentative of the multilingual, multidisciplinary scholastic reality\" (Prasad et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 106, |
|
"text": "(Anzaroot and Mc-Callum, 2013;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 107, |
|
"end": 129, |
|
"text": "Councill et al., 2008;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 130, |
|
"end": 150, |
|
"text": "Prasad et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 323, |
|
"text": "(Anzaroot and McCallum, 2013)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 549, |
|
"end": 554, |
|
"text": "[...]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 631, |
|
"end": 652, |
|
"text": "(Prasad et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To compare the effectiveness of synthetic vs. real bibliographies, we used Grobid. Grobid is the most effective citation parsing tool (Tkaczyk et al., 2018a) and the most easy to use tool based on our experience. Grobid uses conditional random fields (CRF) as machine learning algorithm. Of course, in the long-run, it would be good to conduct our experiments with different machine learning algorithms, particularly deep learning algorithms, but for now we concentrate on one tool and algorithm. Given that all major citation-parsing tools -including Grobid, Cermine and ParsCit -use CRF we consider this sufficient for an initial experiment. Also, we attempted to re-train Neural ParsCit (Prasad et al., 2018) but failed doing so, which indicates that the ease-of-use of the rather new deep-learning methods is not yet as advanced as the established citation parsing tools like Grobid.", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 157, |
|
"text": "(Tkaczyk et al., 2018a)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 690, |
|
"end": 711, |
|
"text": "(Prasad et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We trained Grobid, the CRF respectively, on two datasets. Train Grobid denotes a model trained on 70% (5,460 instances) of the dataset that Grobid uses to train its out-of-the box version. We slightly modified the dataset, i.e. we removed labels for 'pubPlace', 'note' and 'institution' as this information is not contained in GIANT, and hence a model trained on GIANT could not identify these labels 3 . Train GIANT denotes the model trained on a random sample (5,460 instances) of GIANT's 991,411,100 labeled reference strings. Our expectation was that both models would perform similar, or, ideally, Train GIANT would even outperform Train Grobid .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To analyze how the amount of training data affects performance, we additionally trained Train GIANT , on 1k, 3k, 5k, 10k, 20k, and 40k instances of GIANT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We evaluated all models on four datasets. Eval Grobid comprises of the remaining 30% of Grobid's dataset (2,340 reference strings). Eval Cora denotes the Cora dataset, which comprises, after some cleaning, of 1,148 labelled reference strings from the computer science domain. Eval GIANT comprises of 5,000 random reference strings from GIANT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "These three evaluation datasets are potentially not ideal as evaluations are likely biased towards one of the two trained models. Evaluating the models on Eval GIANT likely favors Train GIANT since the data for both Train GIANT and Eval GIANT is highly similar, i.e. it originates from the same dataset. Similarly, evaluating the models on Eval Grobid likely favors Train Grobid as Train Grobid was trained on 70% of the original Grobid dataset and this 70% of the data is highly similar to the remaining 30% that we used for the evaluation. Also, the Cora dataset is somewhat biased, because Grobid's dataset contains parts of Cora. We therefore created another evaluation dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Eval WebPDF is our 'unbiased' dataset with 300 manually annotated citation strings from PDFs that we collected from the Web. To create Eval WebPDF , we chose twenty different words from the homepages of some universities 4 . Then, we used each of the twenty words as a search term in Google Scholar. From each of these searches, we downloaded the first four available PDFs. Of each PDF, we randomly chose four citation strings. This gave approximately sixteen citation strings for each of the twenty keywords. In total, we obtained 300 citation strings. We consider this dataset to be a realistic, though relatively small, dataset for citation parsing in the context of a web-based academic search engine or recommender system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We measure performance of all models with precision, recall, F1 (Micro Average) and F1 (Macro Average) on both field level and token level. We only report 'F1 Macro Average on field level' as all metrics led to similar results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "All source code, data (including the WebPDF dataset), images, and an Excel sheet with all results (including precision and recall and token level results) is available on GitHub https://github.com/BeelGroup/GIANT-The-1-Billion-Annotated-Synthetic-Bibliographic-Reference-String-Dataset/.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The models trained on Grobid (Train Grobid ) and GIANT (Train GIANT ) perform as expected when evaluated on the three 'biased' datasets Eval Grobid , Eval Cora and Eval GIANT (Figure 3) While these results generally might not be surprising, they imply that both synthetic and real data lead to very similar results and 'behave' similarly when used to train models that are evaluated on data being (not) similar to the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 185, |
|
"text": "(Figure 3)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Also interesting is the evaluation on the WebPDF dataset. The model trained on synthetic data (Train GIANT ) and the model trained on real data (Train Grobid ) perform alike with an F1 of 0.74 each (Figure 3 ) 5 . In other words, synthetic and humanlabelled data perform equally well for training our machine learning models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 211, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 207, |
|
"text": "(Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Looking at the data in more detail reveals that some fields are easier to parse than others ( Figure 4 ). For instance, the 'date' field (i.e. year of publication) has a constantly high F1 across all models and evaluation datasets (min=0.86; max=1.0). The 'author' field also has a high F1 throughout all experiments (min=0.75; max=0.99). In contrast, parsing 'booktitle' and 'publisher' seems to strongly benefit from training based on samples similar to the evaluation data. When evaluation and training data is highly similar (e.g. Train GIANT -Eval GIANT or Train Grobid -Eval Grobid ), F1 is relatively high (typically above 0.7). If the evaluation data is different (e.g. Train GIANT -Eval Grobid ), F1 is low (0.15 and 0.16 for Train Grobid and Train GIANT respectively on Eval WebPDF ). The difference in F1 for parsing the book-title is around factor 6.5, with an F1 of 0.97 (Train Grobid ) and 0.15 respectively (Train GIANT ) when evaluated on Eval Grobid .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 103, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Similarly, F1 for parsing the book-title on Eval GIANT differs by around factor 3 with an F1 of 0.75 (Train GIANT ) and 0.27 (Train Grobid ) respectively. While it is well known, and quite intuitive, that different fields are differently difficult to parse, we are first to show that field accuracy varies for different fields differently depending on whether or not the model was trained on data (not) being similar to the evaluation data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In a side experiment, we trained a new model Train Grobid + with additional labels for institution, note and pubPlace (those we removed for the other experiments). Train Grobid + outperformed Train Grobid notably with an F1 of 0.84 vs. 0.74 (+13.5%) when evaluated on Eval WebPDF . This indicates that the more fields are available for training, the better the parsing of all fields becomes even if the additional fields are not in the evaluation data. This finding seems plausible to us and confirms statements by Anzaroot and McCallum but, to the best of our knowledge, we are first to quantify the benefit. It is worth noting that citation parsers do not always use the same fields ( Figure 6 ). For instance, Cermine extracts relatively few fields, but is one of few tools extracting the DOI field.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 687, |
|
"end": 695, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our assumption that more training data would generally lead to better parsing performance -and hence GIANT could be useful for training standard machine learning algorithms -was not confirmed. Increasing training data from 1,000 to 10,000 instances improved F1 by 6% on average over the four evaluation datasets ( Figure 5 ). More precisely, increasing data from 1,000 to 3,000 instances improved F1, on average, by 2.4%; Increasing from 3,000 to 5,000 instances improved F1 by another 2%; Increasing further to 10,000 instances improved F1 by another 1.6%. However, increasing to 20,000 or 40,000 instances leads to no notable improvement, and in some cases even to a decline in F1 ( Figure 5 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 322, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 685, |
|
"end": 693, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In summary, both models -one trained on synthetic data (GIANT) and one trained on 'real' humanannotated reference strings (Grobid) -performed very similar. On the main evaluation dataset (WebPDF) both models achieved an F1 of 0.74. Similarly, if a model was evaluated on data different from its training data, F1 was between 0.6 and 0.7. If a model was evaluated on data similar to the training data, F1 was above 0.9 (+30%). F1 only increased up to a training size of around 10,000 instances (+6% compared to 1,000 instances). Additional fields (e.g. pubplace) in the training data increased F1 notably (+13.5%), even if these additional fields were not in the evaluation data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "These results lead us to the following conclusions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "First, there seems to be little benefit in using synthetic data (e.g. GIANT (Grennan et al., 2019) ) for training traditional machine learning models (i.e. conditional random fields). The existing datasets with a few thousand training instances seem sufficient.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 98, |
|
"text": "(Grennan et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Second, citation parsers should, if possible, be (re)trained on data that is similar to the data that should actually be parsed. Such a re-training increased performance by around 30% in our experiments. This finding may also explain why researchers often report excellent performance of their tools and approaches with e.g. F1's of over 0.9. These researchers typically evaluate their models on data highly similar to the training data. This might be considered a realistic scenario for those cases when re-training is possible. However, re- porting such results creates unrealistic expectations for scenarios without the option to re-train, i.e. for users who just want to use a citation parser like Grobid out-of-the-box. Therefore, we propose that future evaluations of citation parsing algorithms should be conducted on at least two datasets: One dataset that is similar to the training dataset, and one out-of-sample dataset that differs from the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Third, citation parsers should be trained with as many labelled field types as possible, even if these fields will not be in the data that should be parsed. Such a fine-grained training improved F1 by 13.5% in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Fourth, having ten times as much training data (10,000 vs. 1,000) improved the parsing performance by 6%, without notable improvements beyond 10,000 instances. Annotating a few thousand instances should be feasible for many scenarios. Hence, businesses and organizations who want the maximum accuracy should annotate their own data for training as this likely will lead to large increases in accuracy (+30%, see conclusion 3).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Fifth, given how similar synthetic and traditionally annotated data perform, synthetic data likely is suitable to train deep neural networks for citation parsing. This, of course, has yet to be empirically to be shown. However, if our assumption holds true, deep citation parsers could greatly benefit from synthetic data like GIANT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For the future, we see the need to extend our experiments to different machine learning algorithms and datasets (e.g. unarXive (Saier and F\u00e4rber, 2020) or CORE (Knoth and Zdrahal, 2012) ). It would also be interesting to analyze if and to what extend synthetic data could improve related disciplines. This may include citation-string matching, i.e. analyzing whether two different reference strings refer to the same document (Ghavimi et al., 2019) , or the extraction of mathematical formulae (Greiner-Petter et al., 2020) or titles (Lipinski et al., 2013) from scientific articles.", |
|
"cite_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 151, |
|
"text": "(Saier and F\u00e4rber, 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 160, |
|
"end": 185, |
|
"text": "(Knoth and Zdrahal, 2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 448, |
|
"text": "(Ghavimi et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 523, |
|
"text": "(Greiner-Petter et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 534, |
|
"end": 557, |
|
"text": "(Lipinski et al., 2013)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The work presented in this manuscript is based on Mark Grennan's Master thesis \"1 Billion Citation Dataset and Deep Learning Citation Extraction\" at Trinity College Dublin, Ireland, 2018/2019", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the terms 'citation parsing', 'reference parsing', and 'reference-string parsing' interchangeably.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This is a shortcoming of GIANT. However, the purpose of our current work is to generally compare 'real' vs synthetic data. Hence, both datasets should be as similar as possible in terms of available fields to make a fair comparison. Therefore, we removed all fields that were not present in both datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The words were: bone, recommender systems, running, war, crop, monetary, migration, imprisonment, hubble, obstetrics, photonics, carbon, cellulose, evolutionary, revolutionary, paleobiology, penal, leadership, soil, musicology.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "All results are based on the Macro Average F1. Looking at the Micro Average F1 shows a slightly better performance for TrainGrobid than for TrainGIANT (0.82 vs. 0.80), but the difference is neither large nor statistically significant (p\u00a10.05).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We are grateful for the support received by Martin Schibel, Andrew Collins and Dominika Tkaczyk in creating the GIANT dataset. We would also like to acknowledge that this research was partly conducted with the financial support of the ADAPT SFI Research Centre at Trinity College Dublin. The ADAPT SFI Centre for Digital Media Technology is funded by Science Foundation Ireland through the SFI Research Centres Programme and is cofunded under the European Regional Development Fund (ERDF) through Grant 13/RC/2106.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Citation metadata extraction via deep neural network-based segment sequence labeling", |
|
"authors": [ |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "An", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liangcai", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhuoren", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Runtao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhi", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 ACM on Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1967--1970", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dong An, Liangcai Gao, Zhuoren Jiang, Runtao Liu, and Zhi Tang. 2017. Citation metadata extraction via deep neural network-based segment sequence la- beling. In Proceedings of the 2017 ACM on Confer- ence on Information and Knowledge Management, pages 1967-1970. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "A new dataset for fine-grained citation field extraction", |
|
"authors": [ |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Anzaroot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ICML Workshop on Peer Reviewing and Publishing Models", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sam Anzaroot and Andrew McCallum. 2013. A new dataset for fine-grained citation field extraction. ICML Workshop on Peer Reviewing and Publishing Models.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Three options for citation tracking: Google Scholar, Scopus and Web of Science", |
|
"authors": [ |
|
{ |
|
"first": "Nisa", |
|
"middle": [], |
|
"last": "Bakkalbasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janis", |
|
"middle": [], |
|
"last": "Glover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Biomedical Digital Libraries", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1186/1742-5581-3-7" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nisa Bakkalbasi, Kathleen Bauer, Janis Glover, and Lei Wang. 2006. Three options for citation track- ing: Google Scholar, Scopus and Web of Science. Biomedical Digital Libraries, 3.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Google Scholar's Ranking Algorithm: An Introductory Overview", |
|
"authors": [ |
|
{ |
|
"first": "Joeran", |
|
"middle": [], |
|
"last": "Beel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bela", |
|
"middle": [], |
|
"last": "Gipp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 12th International Conference on Scientometrics and Informetrics (ISSI'09)", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "230--241", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joeran Beel and Bela Gipp. 2009a. Google Scholar's Ranking Algorithm: An Introductory Overview. In Proceedings of the 12th International Conference on Scientometrics and Informetrics (ISSI'09), vol- ume 1, pages 230-241, Rio de Janeiro (Brazil). Inter- national Society for Scientometrics and Informetrics. Available at http://docear.org.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Google Scholar's Ranking Algorithm: The Impact of Citation Counts (An Empirical Study)", |
|
"authors": [ |
|
{ |
|
"first": "Joeran", |
|
"middle": [], |
|
"last": "Beel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bela", |
|
"middle": [], |
|
"last": "Gipp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 3rd IEEE International Conference on Research Challenges in Information Science (RCIS'09)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "439--446", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/RCIS.2009.5089308" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joeran Beel and Bela Gipp. 2009b. Google Scholar's Ranking Algorithm: The Impact of Citation Counts (An Empirical Study). In Proceedings of the 3rd IEEE International Conference on Research Chal- lenges in Information Science (RCIS'09), pages 439-446, Fez (Morocco). IEEE. Available at http://docear.org.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Research paper recommender systems: A literature survey", |
|
"authors": [ |
|
{ |
|
"first": "Joeran", |
|
"middle": [], |
|
"last": "Beel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bela", |
|
"middle": [], |
|
"last": "Gipp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Langer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Corinna", |
|
"middle": [], |
|
"last": "Breitinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "International Journal on Digital Libraries", |
|
"volume": "", |
|
"issue": "4", |
|
"pages": "305--338", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s00799-015-0156-0" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joeran Beel, Bela Gipp, Stefan Langer, and Corinna Breitinger. 2016. Research paper recommender sys- tems: A literature survey. International Journal on Digital Libraries, (4):305-338.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Deepbibx: Deep learning for image based bibliographic data extraction", |
|
"authors": [ |
|
{ |
|
"first": "Akansha", |
|
"middle": [], |
|
"last": "Bhardwaj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dominik", |
|
"middle": [], |
|
"last": "Mercier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Neural Information Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "286--293", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akansha Bhardwaj, Dominik Mercier, Andreas Dengel, and Sheraz Ahmed. 2017. Deepbibx: Deep learning for image based bibliographic data extraction. In In- ternational Conference on Neural Information Pro- cessing, pages 286-293. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Bibliometric-enhanced information retrieval (bir) 10th anniversary workshop edition", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Cabanac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingo", |
|
"middle": [], |
|
"last": "Frommholz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Mayr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2001.10336" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Cabanac, Ingo Frommholz, and Philipp Mayr. 2020. Bibliometric-enhanced information retrieval (bir) 10th anniversary workshop edition. arXiv preprint arXiv:2001.10336.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "ParsCit: An open-source CRF reference string parsing package", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Councill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Giles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Kan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of LREC", |
|
"volume": "2008", |
|
"issue": "", |
|
"pages": "661--667", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "I.G. Councill, C.L. Giles, and M.Y. Kan. 2008. ParsCit: An open-source CRF reference string pars- ing package. In Proceedings of LREC, volume 2008, pages 661-667. European Language Resources As- sociation (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Extended co-citation search: Graphbased document retrieval on a co-citation network containing citation context information", |
|
"authors": [ |
|
{ |
|
"first": "Masaki", |
|
"middle": [], |
|
"last": "Eto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Information Processing & Management", |
|
"volume": "56", |
|
"issue": "6", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Masaki Eto. 2019. Extended co-citation search: Graph- based document retrieval on a co-citation network containing citation context information. Informa- tion Processing & Management, 56(6):102046.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Citation recommendation: Approaches and datasets", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "F\u00e4rber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Jatowt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2002.06961" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael F\u00e4rber and Adam Jatowt. 2020. Citation recommendation: Approaches and datasets. arXiv preprint arXiv:2002.06961.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Citewerts: A system combining citeworthiness with citation recommendation", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "F\u00e4rber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Thiemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Jatowt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "European Conference on Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "815--819", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael F\u00e4rber, Alexander Thiemann, and Adam Ja- towt. 2018. Citewerts: A system combining cite- worthiness with citation recommendation. In Eu- ropean Conference on Information Retrieval, pages 815-819. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "The citeproc-js Citation Processor", |
|
"authors": [ |
|
{ |
|
"first": "Frank", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Bennett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jr. Frank G. Bennett. 2011. The citeproc-js Citation Processor.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "An evaluation of the effect of reference strings and segmentation on citation matching", |
|
"authors": [ |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Behnam Ghavimi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Otto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mayr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Theory and Practice of Digital Libraries", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "365--369", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Behnam Ghavimi, Wolfgang Otto, and Philipp Mayr. 2019. An evaluation of the effect of reference strings and segmentation on citation matching. In Interna- tional Conference on Theory and Practice of Digital Libraries, pages 365-369. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Discovering mathematical objects of interest-a study of mathematical notations. arXiv preprint", |
|
"authors": [ |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [], |
|
"last": "Greiner-Petter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Moritz", |
|
"middle": [], |
|
"last": "Schubotz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabian", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Corinna", |
|
"middle": [], |
|
"last": "Breitinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Howard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Akiko", |
|
"middle": [], |
|
"last": "Cohl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bela", |
|
"middle": [], |
|
"last": "Aizawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gipp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2002.02712" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andr\u00e9 Greiner-Petter, Moritz Schubotz, Fabian M\u00fcller, Corinna Breitinger, Howard S Cohl, Akiko Aizawa, and Bela Gipp. 2020. Discovering mathematical ob- jects of interest-a study of mathematical notations. arXiv preprint arXiv:2002.02712.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Giant: The 1-billion annotated synthetic bibliographic-reference-string dataset for deep citation parsing", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Grennan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Schibel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joeran", |
|
"middle": [], |
|
"last": "Beel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "27th AIAI Irish Conference on Artificial Intelligence and Cognitive Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "101--112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Grennan, Martin Schibel, Andrew Collins, and Joeran Beel. 2019. Giant: The 1-billion annotated synthetic bibliographic-reference-string dataset for deep citation parsing. In 27th AIAI Irish Confer- ence on Artificial Intelligence and Cognitive Science, pages 101-112.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Testing the calculation of a realistic h-index in Google Scholar, Scopus, and Web of Science for FW Lancaster", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Jacso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Library Trends", |
|
"volume": "56", |
|
"issue": "4", |
|
"pages": "784--815", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Jacso. 2008. Testing the calculation of a realistic h-index in Google Scholar, Scopus, and Web of Sci- ence for FW Lancaster. Library Trends, 56(4):784- 815.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Graph embedding for citation recommendation", |
|
"authors": [ |
|
{ |
|
"first": "Haofeng", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Saule", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1812.03835" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haofeng Jia and Erik Saule. 2018. Graph embed- ding for citation recommendation. arXiv preprint arXiv:1812.03835.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Core: three access levels to underpin open access. D-Lib Magazine", |
|
"authors": [ |
|
{ |
|
"first": "Petr", |
|
"middle": [], |
|
"last": "Knoth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zdenek", |
|
"middle": [], |
|
"last": "Zdrahal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "18", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Petr Knoth and Zdenek Zdrahal. 2012. Core: three ac- cess levels to underpin open access. D-Lib Maga- zine, 18(11/12).", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Evaluation of header metadata extraction approaches and tools for scientific pdf documents", |
|
"authors": [ |
|
{ |
|
"first": "Mario", |
|
"middle": [], |
|
"last": "Lipinski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Corinna", |
|
"middle": [], |
|
"last": "Breitinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joeran", |
|
"middle": [], |
|
"last": "Beel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bela", |
|
"middle": [], |
|
"last": "Gipp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 13th", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mario Lipinski, Kevin Yao, Corinna Breitinger, Joeran Beel, and Bela Gipp. 2013. Evaluation of header metadata extraction approaches and tools for scien- tific pdf documents. In Proceedings of the 13th", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "ACM/IEEE-CS joint conference on Digital libraries (JCDL'13)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "385--386", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "ACM/IEEE-CS joint conference on Digital libraries (JCDL'13), pages 385-386.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Citesight: supporting contextual citation recommendation using differential search", |
|
"authors": [ |
|
{ |
|
"first": "Avishay", |
|
"middle": [], |
|
"last": "Livne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Gokuladas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Teevan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Susan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eytan", |
|
"middle": [], |
|
"last": "Dumais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Adar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "807--816", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Avishay Livne, Vivek Gokuladas, Jaime Teevan, Su- san T Dumais, and Eytan Adar. 2014. Citesight: supporting contextual citation recommendation us- ing differential search. pages 807-816.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Grobid: Combining automatic bibliographic data recognition and term extraction for scholarship publications", |
|
"authors": [ |
|
{ |
|
"first": "Patrice", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "International conference on theory and practice of digital libraries", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "473--474", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrice Lopez. 2009. Grobid: Combining automatic bibliographic data recognition and term extraction for scholarship publications. In International con- ference on theory and practice of digital libraries, pages 473-474. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Grobid, github repository", |
|
"authors": [ |
|
{ |
|
"first": "Patrice", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrice Lopez. 2013. Grobid, github repository.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Training data query #535", |
|
"authors": [ |
|
{ |
|
"first": "Patrice", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrice Lopez. 2020. Train- ing data query #535. GitHub https://github.com/kermitt2/grobid/issues/535.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Information extraction from scientific articles: a survey", |
|
"authors": [ |
|
{ |
|
"first": "Zara", |
|
"middle": [], |
|
"last": "Nasar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muhammad Kamran", |
|
"middle": [], |
|
"last": "Syed Waqar Jaffry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Malik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Scientometrics", |
|
"volume": "117", |
|
"issue": "3", |
|
"pages": "1931--1990", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zara Nasar, Syed Waqar Jaffry, and Muhammad Kam- ran Malik. 2018. Information extraction from scien- tific articles: a survey. Scientometrics, 117(3):1931- 1990.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Neural parscit: a deep learning-based reference string parser", |
|
"authors": [ |
|
{ |
|
"first": "Animesh", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manpreet", |
|
"middle": [], |
|
"last": "Kaur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Journal on Digital Libraries", |
|
"volume": "19", |
|
"issue": "4", |
|
"pages": "323--337", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s00799-018-0242-1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Animesh Prasad, Manpreet Kaur, and Min-Yen Kan. 2018. Neural parscit: a deep learning-based refer- ence string parser. International Journal on Digital Libraries, 19(4):323-337.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Deepbird: An automatic bibliographic reference detection approach", |
|
"authors": [], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1912.07266" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Syed Tahseen Raza Rizvi, Andreas Dengel, and Sheraz Ahmed. 2019. Deepbird: An automatic bib- liographic reference detection approach. arXiv preprint arXiv:1912.07266.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Deep reference mining from scholarly literature in the arts and humanities. Frontiers in", |
|
"authors": [ |
|
{ |
|
"first": "Danny", |
|
"middle": [], |
|
"last": "Rodrigues Alves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni", |
|
"middle": [], |
|
"last": "Colavizza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fr\u00e9d\u00e9ric", |
|
"middle": [], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Research Metrics and Analytics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danny Rodrigues Alves, Giovanni Colavizza, and Fr\u00e9d\u00e9ric Kaplan. 2018. Deep reference mining from scholarly literature in the arts and humanities. Fron- tiers in Research Metrics and Analytics, 3:21.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "2020. unarxive: a large scholarly data set with publications' full-text, annotated in-text citations, and links to metadata", |
|
"authors": [ |
|
{ |
|
"first": "Tarek", |
|
"middle": [], |
|
"last": "Saier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "F\u00e4rber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Scientometrics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tarek Saier and Michael F\u00e4rber. 2020. unarxive: a large scholarly data set with publications' full-text, annotated in-text citations, and links to metadata. Scientometrics, pages 1-24.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Machine learning vs. rules and out-of-the-box vs. retrained: An evaluation of open-source bibliographic reference and citation parsers", |
|
"authors": [ |
|
{ |
|
"first": "Dominika", |
|
"middle": [], |
|
"last": "Tkaczyk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paraic", |
|
"middle": [], |
|
"last": "Sheridan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joeran", |
|
"middle": [], |
|
"last": "Beel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 18th ACM/IEEE on Joint Conference on Digital Libraries, JCDL '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "99--108", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3197026.3197048" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dominika Tkaczyk, Andrew Collins, Paraic Sheridan, and Joeran Beel. 2018a. Machine learning vs. rules and out-of-the-box vs. retrained: An evalua- tion of open-source bibliographic reference and cita- tion parsers. In Proceedings of the 18th ACM/IEEE on Joint Conference on Digital Libraries, JCDL '18, pages 99-108, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Parsrec: A novel meta-learning approach to recommending bibliographic reference parsers", |
|
"authors": [ |
|
{ |
|
"first": "Dominika", |
|
"middle": [], |
|
"last": "Tkaczyk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rohit", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Riccardo", |
|
"middle": [], |
|
"last": "Cinti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joeran", |
|
"middle": [], |
|
"last": "Beel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 26th Irish Conference on Artificial Intelligence and Cognitive Science (AICS)", |
|
"volume": "2259", |
|
"issue": "", |
|
"pages": "162--173", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dominika Tkaczyk, Rohit Gupta, Riccardo Cinti, and Joeran Beel. 2018b. Parsrec: A novel meta-learning approach to recommending bibliographic reference parsers. In Proceedings of the 26th Irish Confer- ence on Artificial Intelligence and Cognitive Science (AICS), volume 2259, pages 162-173. CEUR-WS.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Parsrec: A meta-learning recommender system for bibliographic reference parsing tools", |
|
"authors": [ |
|
{ |
|
"first": "Dominika", |
|
"middle": [], |
|
"last": "Tkaczyk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paraic", |
|
"middle": [], |
|
"last": "Sheridan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joeran", |
|
"middle": [], |
|
"last": "Beel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 12th ACM Conference on Recommender Systems (RecSys)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dominika Tkaczyk, Paraic Sheridan, and Joeran Beel. 2018c. Parsrec: A meta-learning recommender sys- tem for bibliographic reference parsing tools. In Proceedings of the 12th ACM Conference on Recom- mender Systems (RecSys).", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Cermine: automatic extraction of structured metadata from scientific literature", |
|
"authors": [ |
|
{ |
|
"first": "Dominika", |
|
"middle": [], |
|
"last": "Tkaczyk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pawe\u0142", |
|
"middle": [], |
|
"last": "Szostek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mateusz", |
|
"middle": [], |
|
"last": "Fedoryszak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [ |
|
"Jan" |
|
], |
|
"last": "Dendek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Bolikowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Journal on Document Analysis and Recognition", |
|
"volume": "18", |
|
"issue": "4", |
|
"pages": "317--335", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dominika Tkaczyk, Pawe\u0142 Szostek, Mateusz Fedo- ryszak, Piotr Jan Dendek, and \u0141ukasz Bolikowski. 2015. Cermine: automatic extraction of structured metadata from scientific literature. International Journal on Document Analysis and Recognition (IJ- DAR), 18(4):317-335.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Towards highly accurate publication information extraction from academic homepages", |
|
"authors": [ |
|
{ |
|
"first": "Yiqing", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yiqing Zhang. 2018. Towards highly accurate publi- cation information extraction from academic home- pages.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "List of Citation DatasetsFigure 3: F1 of the two models (Train Grobid and Train GIANT ) on the four evaluation datasets.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "F1 for different fields (title, author, ...), evaluation dataset and training data.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "Performance (F1) of Train GIANT on the four evaluation datasets, by the number of training instances.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"text": "The approach and extracted fields of six popular open-source citation parsing tools", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"text": ". When evaluated on Eval Grobid , Train Grobid outperforms Train GIANT by 35% with an F1 of 0.93 vs. 0.69. When evaluated on Eval GIANT , results are almost exactly the opposite: This time, Train GIANT outperforms Train Grobid by 32% with an F1 of 0.91 vs. 0.69. On Eval Cora , the difference is less strong but still notable. Train Grobid outperforms Train GIANT by 19% with an F1 of 0.74 vs. 0.62. This is not surprising as Grobid's training data includes some Cora data.", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |