|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:12:48.568820Z" |
|
}, |
|
"title": "Dating Ancient texts: an Approach for Noisy French Documents", |
|
"authors": [ |
|
{ |
|
"first": "Ana\u00eblle", |
|
"middle": [], |
|
"last": "Baledent", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Sorbonne University", |
|
"location": { |
|
"postCode": "STIH -EA 4509", |
|
"settlement": "Paris", |
|
"country": "France" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Hiebel", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Sorbonne University", |
|
"location": { |
|
"postCode": "STIH -EA 4509", |
|
"settlement": "Paris", |
|
"country": "France" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ga\u00ebl", |
|
"middle": [], |
|
"last": "Lejeune", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Sorbonne University", |
|
"location": { |
|
"postCode": "STIH -EA 4509", |
|
"settlement": "Paris", |
|
"country": "France" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Automatic dating of ancient documents is a very important area of research for digital humanities applications. Many documents available via digital libraries do not have any dating or dating that is uncertain. Document dating is not only useful by itself but it also helps to choose the appropriate NLP tools (lemmatizer, POS tagger. . .) for subsequent analysis. This paper provides a dataset with thousands of ancient documents in French and present methods and evaluation metrics for this task. We compare character-level methods with token-level methods on two different datasets of two different time periods and two different text genres. Our results show that character-level models are more robust to noise than classical token-level models. The experiments presented in this article focused on documents written in French but we believe that the ability of character-level models to handle noise properly would help to achieve comparable results on other languages and more ancient languages in particular.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Automatic dating of ancient documents is a very important area of research for digital humanities applications. Many documents available via digital libraries do not have any dating or dating that is uncertain. Document dating is not only useful by itself but it also helps to choose the appropriate NLP tools (lemmatizer, POS tagger. . .) for subsequent analysis. This paper provides a dataset with thousands of ancient documents in French and present methods and evaluation metrics for this task. We compare character-level methods with token-level methods on two different datasets of two different time periods and two different text genres. Our results show that character-level models are more robust to noise than classical token-level models. The experiments presented in this article focused on documents written in French but we believe that the ability of character-level models to handle noise properly would help to achieve comparable results on other languages and more ancient languages in particular.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Nowadays, a large number of historical documents is accessible through digital libraries among which we can cite EUROPEANA 1 or GALLICA 2 among other Digital Humanities (DH) digitization projects. This allows libraries to spread cultural heritage to a large and various audience (academics, historians, sociologists among others). It is also a great opportunity to have such an amount of data usable in various projects including NLP projects. However, exploiting these documents automatically can be difficult because of the their various quality, their imperfect digitization, the lack of metadata or the fact that they exhibit a great variety of languages (among which under-resourced languages). Many documents will be difficult to access for researchers since it is difficult to unite them in a corpus, to rely on consistent metadata or to use NLP tools if the data is too noisy. In particular, it is difficult for DH researchers to use most of available data since the quality of the Optical Character Recognition (OCR) on ancient documents can make them impossible to process properly with classical NLP tools. Therefore, pre-processing and data cleaning is often mandatory to make them suitable for classical NLP pipelines. This need increases the cost of treating new corpora for DH researchers since choosing the appropriate NLP tools can even be difficult. The problems encountered can vary with respect to the languages used in the document or the period were the document has been printed but it remains an open problem. Therefore, the knowledge of the date of the document is not only useful by itself but also because it helps to choose the appropriate OCR configuration (Cecotti and Bela\u00efd, 2005) , the post-processing techniques after the OCR phase (Afli et al., 2016) or the appropriate NLP processing tools to use for a particular corpus (Sagot, 2019) . Hence, we propose in this paper to investigate the problem of document dating in noisy documents. The contribution of this paper is three fold : (I) we pro-pose a corpus of around 8,000 ancient documents in French (published from 1600 to 1710), (II) we propose some methods to enrich the metadata and (III) we propose new ideas to evaluate the quality of digitized data in order to put the DH researcher in the center of the loop. In the experiments part we will focus on the document dating task but we believe that the corpus we developed and the rationale of our methods can be useful for other tasks. In Section 2. we present related work on corpus construction and document dating. In Section 3. we present the corpus made available with the article and in section 4. we show some results on document dating on this corpus and compare our method with other state-of-the-art datasets. Finally in Section 5. we give some words of conclusion and present future orientations of this work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1686, |
|
"end": 1712, |
|
"text": "(Cecotti and Bela\u00efd, 2005)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1766, |
|
"end": 1785, |
|
"text": "(Afli et al., 2016)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1857, |
|
"end": 1870, |
|
"text": "(Sagot, 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "In this work we try to tackle the problem of document dating in the context of historical textual documents. One way to tackle this task is to define it as a classification task, each year (or another time granularity) being a class. (Niculae et al., 2014) proposed a text ranking approach for solving document dating. Temporal language models for document dating use mainly a token-level representation. (Popescu and Strapparava, 2013) develop the hypothesis that period changes come with topics changes and written information reflect these changes by used vocabulary. So, one can delimit epochs by observing the variation in word frequencies or word contexts like in recent works about semantic change (Hamilton et al., 2016) . In the same fashion, (de Jong et al., 2005) and (Kanhabua and N\u00f8rv\u00e5g, 2008) used probabilistic models: the authors assign each word a probability to appear in a time period. Semantic change is therefore leveraged to give a time stamp to a given document. Some authors proposed graph models to extract relationship between events related in the document in order to find the document focus time (Jatowt et al., 2013) or compute an appropriate time stamp for the document (Mishra and Berberich, 2016) . Another interesting approach comes from (Stajner and Zampieri, 2013) who used four stylistic features to find appropriate document dating: average sentence length, average word length, lexical density and lexical richness. Several works on the subject of document dating involved preprocessing of texts (e.g. tokenization, morphosynctatic tagging or named-entity recognition) or external resources, like Wikipedia or Google Ngram in order to detect explicit features that can characterize the date of a document : named entities, neologisms or to the contrary archaic words ((Garcia-Fernandez et al., 2011) ; (Salaberri et al., 2015) ) However, this implies to have access a clean plain text, or a text without too much OCR errors in order to apply data cleaning techniques. Indeed the majority of works exploits newspapers' articles, due to facility for collect them on web and a high precision for dating, and few works use digitized documents. In Section 3. we show how corpus construction can be an issue for these token-level models and why the corpus we wanted to process can be too noisy for them.", |
|
"cite_spans": [ |
|
{ |
|
"start": 234, |
|
"end": 256, |
|
"text": "(Niculae et al., 2014)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 436, |
|
"text": "(Popescu and Strapparava, 2013)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 705, |
|
"end": 728, |
|
"text": "(Hamilton et al., 2016)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 752, |
|
"end": 774, |
|
"text": "(de Jong et al., 2005)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 779, |
|
"end": 806, |
|
"text": "(Kanhabua and N\u00f8rv\u00e5g, 2008)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1125, |
|
"end": 1146, |
|
"text": "(Jatowt et al., 2013)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1201, |
|
"end": 1229, |
|
"text": "(Mishra and Berberich, 2016)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1806, |
|
"end": 1838, |
|
"text": "((Garcia-Fernandez et al., 2011)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1841, |
|
"end": 1865, |
|
"text": "(Salaberri et al., 2015)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Textual Document Dating", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Corpus construction is a crucial aspect in Computational Linguistics (CL) and Digital Humanities (DH) fields: the corpus construction is one of the first steps in research. To obtain relevant results, the used corpora must meet specific criteria: genre, medium, topic among other criteria (see (Sinclair, 1996) or (Biber, 1993) for other criteria examples). It must also be adapted with research objectives: a classification task doesn't require same data that a literary analysis. Another question regarding corpus construction is the following: what NLP tools can be used for processing the corpus ? With Internet one can easily access to a huge amount of texts and corpora. Despite this, researchers must be careful with the data sources : quality, authenticity, noisiness. Barbaresi (Barbaresi, 2015) mentions inherent problems with a web scrapper method to collect corpus: repeated and/or generated text, wrong machine-translated text, spam, multilanguage documents or empty documents. Documents exhibiting this kind of problems can impair the efficiency of classifiers or other NLP modules and force researchers to rebuild a new corpus or to clean the data manually. Digital libraries provide many and various textual archives, easy to collect and often used in Digital Humanities in view of topics. Indeed, these corpora are also diversified that domains in Humanities and Social Sciences (HSS): 19 th century newspapers, middle-age manuscripts or early modern prints, (Abiven and Lejeune, 2019) . However, these documents are not \"born-digital\" and are often available only in image format. The quality of the text one can extract from these images is far from perfect. So, OCR performances are lower than one can expect on a modern document and this deterioration has an impact on the usability of the data. Several works like (Traub et al., 2015) or (Linhares Pontes et al., 2019) showed that OCR errors has an important impact on NLP tools efficiency and subsequent expert analysis. Therefore, correcting automatically OCR has become an important prior task to take more advantage of digitalized (Rigaud et al., 2019) ). Automation of this post-processing may reduce financial and temporal costs as compared to manual correction. It is a great challenge for Digital Humanities since these costs can in some cases constitute the biggest part of DH projects budget.", |
|
"cite_spans": [ |
|
{ |
|
"start": 294, |
|
"end": 310, |
|
"text": "(Sinclair, 1996)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 314, |
|
"end": 327, |
|
"text": "(Biber, 1993)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 787, |
|
"end": 804, |
|
"text": "(Barbaresi, 2015)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1476, |
|
"end": 1502, |
|
"text": "(Abiven and Lejeune, 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1836, |
|
"end": 1856, |
|
"text": "(Traub et al., 2015)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1860, |
|
"end": 1890, |
|
"text": "(Linhares Pontes et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 2107, |
|
"end": 2128, |
|
"text": "(Rigaud et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus Construction", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "The corpus we mainly use for our experimentations has been collected on the French digital library GALLICA. From GALLICA it is possible to access to a large amount of digitized historical and various documents and we wanted to see how we can apply NLP techniques to old documents were the OCR makes a lot of errors. Some textual documents have also plain text access, in fact a non corrected OCR output. On the GALLICA website, advanced search's tab allows a search with different filters like date of publication, language, type of document or theme. For this experiment, we selected all Latin and French documents with plain text access and dated between 1600 and 1720. It represents about 8,000 documents. With the search API we exported a research report in CSV format and transformed it in a JSON file. Each document has an unique identifier and has metadata among which title, author(s), editor, date and other descriptions 3 . We took advantage of this research report to download all the documents in HTML. We developed a tool that scrapes the text and sorts the documents according to different kinds of metadata 4 . Four versions for each text are extracted by this tool in order to fulfill different needs : (i) plain text with dates inside the documents; (ii) plain text where dates have been removed (with regular expressions); (iii) text with HTML tags and dates; (iv) text with HTML tags and without date. For assuring that we have the appropriate date for each document, we took advantage of the date indicated in HTML metadata. Documents for which the metadata exhibited an uncertain date like 16, 16??, 16.. or a time period (1667-1669) have been discarded. Table 1 exhibits the statistics on the dataset we extracted 3 Metadata present in the resource associated with this paper 4 GITHUB repository to be made public from GALLICA. In order to perform comparisons with other approaches we also used two other corpora of ancient French documents of another period (1800-1950) which had also OCR issues: Deft 2010 challenge on document dating (Grouin et al., 2010) where the objective was to give the good decade for a given text.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2059, |
|
"end": 2080, |
|
"text": "(Grouin et al., 2010)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1676, |
|
"end": 1683, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Dataset for Document Dating", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "We propose a method that takes advantage of noisy corpus to enrich metadata. The rationale of our method is to be as much independent of pre-processing steps because the lack of language dedicated resources (few NLP tools exist for ancient languages and their efficiency can be put into question). This can help DH researchers to process more easily new datasets since models robust to noise can avoid research projects to use too much resources in data preparation. For the GALLICA corpus we split the data into a training set (70%) and a test set (30%) and maintained the imbalance between the different classes. For the DEFT2010 corpora, the data was already separated between train and test so we kept it in order to ease comparisons with previous approaches. We aim to find models suitable for noisy data so we got inspiration from recent works that showed that characterlevel models perform well for document dating (Abiven and Lejeune, 2019) . We compare character-level representation to word-level representations in order to assess their respective advantages. We present our first results in Section 4..", |
|
"cite_spans": [ |
|
{ |
|
"start": 922, |
|
"end": 948, |
|
"text": "(Abiven and Lejeune, 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training a Temporal model", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "In this Section, we first present results on the the Gallica dataset, then we use the exact same configuration to train a temporal model for the DEFT2010 challenge dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "For evaluation purposes, we use two different metrics. First, we use macro f-measure rather than micro f-measure to compare different models for document dating since the corpus we built from GALLICA is quite imbalanced. Then, since all the classification errors do not have the same impact, in other words when we have a document from 1650 it is better to predict 1640 than 1630, we wanted to have another measure. We choosed to use a Gaussian similarity (here after Similarity), as defined by Grouin et al. (Grouin et al., 2011) in order to measure how much there is a difference between the predicted decade and the real decade. It is computed as follows (with pd being the predicted decade and rd being the real decade):", |
|
"cite_spans": [ |
|
{ |
|
"start": 495, |
|
"end": 530, |
|
"text": "Grouin et al. (Grouin et al., 2011)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "Similarity(pd, rd) = e \u2212\u03c0/10 2 (pd\u2212rd) 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "This measure has the good property to highlight systems that produce smaller errors: an error of two decades is worst than two errors of one decade (see Table 2 for an excerpt of this similarity measure outcome). improves results until N = 4 . With N > 4 there is no improvement and at some point the results get even worse, this observation is consistent with previous experiments with this kind of features (Brixtel, 2015) . Longer N size seems to interfere with generalization. With a random forest classifier and token-level features (token n-grams with 1 <= N <= 3) we obtained at the best 0.85 in similarity if we discard tokens that include non-alphanumeric characters and 0.93 if we do not discard them. This shows that punctuation, and in general short sequences of characters, are very useful for this kind of task even if they offer worse performances than character n-grams. Another interesting result is that this token-level model achieves only a 46.3% score in macro F-measure. These features exhibit more errors, resulting in a worse F-measure, but the errors are closer to the target. Figure 1 exhibits the confusion matrix on the GALLICA dataset with our best classifier. One can see that most classification errors are low range errors, this is consistent with the high similarity score the classifier achieves. As presented before, this model outperforms the best token-level model ( Figure 2 ) in F-measure but the difference in similarity is less significant. When comparing the first line of the two confusion matrices one can see that the number of true positives (first cell of the line) is logically higher with the character-level model. However, the false negatives (rest of the line) are in fact very close to the target class, the tokenlevel model shows a bit less errors of 3 decades and more. Figure 1 : Character-level model (n-grams with 1 <= n <= 4): confusion matrix for the best classifier (Random Forest with 10 trees) on the GALLICA corpus, F-measure= 71.43, Similarity =0.950 Figure 2 : Token-level model (n-grams with 1 <= n <= 2): confusion matrix for the best classifier (Random Forest with 10 trees) on the GALLICA corpus, F-measure= 46.27, Similarity =0.928", |
|
"cite_spans": [ |
|
{ |
|
"start": 409, |
|
"end": 424, |
|
"text": "(Brixtel, 2015)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 160, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 1102, |
|
"end": 1110, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1404, |
|
"end": 1412, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1825, |
|
"end": 1833, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2016, |
|
"end": 2024, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "In Figure 3 we present the results obtained with the same classifier trained and tested on the DEFT2010 dataset. With an F-measure of 32.8 its results are comparable to the best performer (F=33.8) for that challenge which is promising since we did not perform any kind of feature engineering dedicated to this dataset, we just used the same kind of features and the same classifier parameters. We can see Figure 3 : Character-level model (n-grams with 1 <= n <= 4): confusion matrix for a Random Forest classifier with 10 trees trained and tested on the DEFT2010 dataset, F-measure= 32.81, Similarity =0.872 that most classification errors occur on the previous or next decade. Two interesting things occur however, the 1870 is the most prone to False Positives. It is interesting since this class represent the middle of the period. The 1940 decade does not contain any True Positive. This can be linked to a historical reason since most of the newspapers of this period were not authorized so that there is no clear tendency regarding the printing methods used during this period, illustrating a limit of the character-based models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 413, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results on the DEFT2010 dataset", |
|
"sec_num": "4.3." |
|
}, |
|
{ |
|
"text": "In this paper we proposed a dataset suited for ancient documents dating. This dataset contains more than 8k documents in French written between 1600 to 1710. The documents in this dataset exhibit a poor quality due to a bad and not post-corrected OCR. Our results show that this should not be a problem for document dating since noise in texts does not seen to impair document dating results. To the contrary, OCR errors seem to be good features to detect the printing time of the original document. We showed that a character-level model can take advantage of noise to improve classification results as compared to a classical tokenlevel model. On a comparable dataset (DEFT2010) from a different time period (1800 to 1940) we show that the exact same features and classifier configuration achieved results close to the state-of-the-art. We believe this is an important result since post-correction of texts can be a very costly operation. This result shows that one can perform NLP task without requiring perfect datasets as input. In the future it would be interesting to see in a larger scope what is the impact of bad digitization on subsequent Natural Language Processing tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Perspectives", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "https://www.europeana.eu/ 2 https://gallica.bnf.fr/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Automatic analysis of old documents: taking advantage of an incomplete, heterogeneous and noisy corpus. Recherche d'information, document et web s\u00e9mantique", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Abiven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Lejeune", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abiven, K. and Lejeune, G. (2019). Automatic analysis of old documents: taking advantage of an incomplete, het- erogeneous and noisy corpus. Recherche d'information, document et web s\u00e9mantique, 2(Num\u00e9ro 1).", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Using SMT for OCR error correction of historical texts", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Afli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Sheridan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "962--966", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Afli, H., Qiu, Z., Way, A., and Sheridan, P. (2016). Us- ing SMT for OCR error correction of historical texts. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 962-966, Portoro\u017e, Slovenia, May. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Bootstrapped OCR error detection for a less-resourced language variant", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Barbaresi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Theses", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "June", |
|
"middle": [], |
|
"last": "Lyon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Barbaresi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "13th Conference on Natural Language Processing (KONVENS 2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "21--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barbaresi, A. (2015). Ad hoc and general-purpose corpus construction from web sources. Theses, ENS Lyon, June. Barbaresi, A. (2016). Bootstrapped OCR error detection for a less-resourced language variant. In Stefanie Dip- per, et al., editors, 13th Conference on Natural Language Processing (KONVENS 2016), pages 21-26, Bochum, Germany, September.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Representativeness in corpus design", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Biber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Literary and Linguistic Computing", |
|
"volume": "8", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Biber, D. (1993). Representativeness in corpus design. Literary and Linguistic Computing, 8(4):243-257, 01.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Maximal repeats enhance substringbased authorship attribution", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Brixtel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hissar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "September", |
|
"middle": [], |
|
"last": "Bulgaria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ltd", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--71", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brixtel, R. (2015). Maximal repeats enhance substring- based authorship attribution. In Proceedings of the International Conference Recent Advances in Natural Language Processing, pages 63-71, Hissar, Bulgaria, September. INCOMA Ltd. Shoumen, BULGARIA.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Hybrid OCR combination approach complemented by a specialized ICR applied on ancient documents", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Cecotti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Bela\u00efd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Seoul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "August", |
|
"middle": [], |
|
"last": "Korea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Jong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Rode", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hiemstra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Humanities, computers and cultural heritage: Proceedings of the XVIth International Conference of the Association for History and Computing (AHC 2005)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "161--168", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cecotti, H. and Bela\u00efd, A. (2005). Hybrid OCR combina- tion approach complemented by a specialized ICR ap- plied on ancient documents. In 8th International Con- ference in Document Analysis and Recognition -IC- DAR'05, pages 1045-1049, Seoul, Korea, August. de Jong, F., Rode, H., and Hiemstra, D. (2005). Tem- poral language models for the disclosure of historical text. In Humanities, computers and cultural heritage: Proceedings of the XVIth International Conference of the Association for History and Computing (AHC 2005), pages 161-168. Koninklijke Nederlandse Academie van Wetenschappen, 9.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "M\u00e9thodes pour l'arch\u00e9ologie linguistique : datation par combinaison d'indices temporels", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Garcia-Fernandez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A.-L", |
|
"middle": [], |
|
"last": "Ligozat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Dinarelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernhard", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "D\u00c9fi Fouille de Textes", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Garcia-Fernandez, A., Ligozat, A.-L., Dinarelli, M., and Bernhard, D. (2011). M\u00e9thodes pour l'arch\u00e9ologie lin- guistique : datation par combinaison d'indices tem- porels. In D\u00c9fi Fouille de Textes, pages -, Montpellier, France, July.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Pr\u00e9sentation et r\u00e9sultats du d\u00e9fi fouille de texte DEFT2010 o\u00f9 et quand un article de presse a-t-il\u00e9t\u00e9\u00e9crit ?", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Grouin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Forest", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Da Sylva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Paroubek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Zweigenbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Actes de DEFT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grouin, C., Forest, D., Da Sylva, L., Paroubek, P., and Zweigenbaum, P. (2010). Pr\u00e9sentation et r\u00e9sultats du d\u00e9fi fouille de texte DEFT2010 o\u00f9 et quand un article de presse a-t-il\u00e9t\u00e9\u00e9crit ? In Actes de DEFT, Montr\u00e9al, QC, 23 juillet. TALN.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Pr\u00e9sentation et r\u00e9sultats du d\u00e9fi fouille de texte DEFT2011. quand un article de presse a-t-il\u00e9t\u00e9\u00e9crit ?\u00e0 quel article scientifique correspond ce r\u00e9sum\u00e9 ?", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Grouin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Forest", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Paroubek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Zweigenbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Actes de DEFT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grouin, C., Forest, D., Paroubek, P., and Zweigenbaum, P. (2011). Pr\u00e9sentation et r\u00e9sultats du d\u00e9fi fouille de texte DEFT2011. quand un article de presse a-t-il\u00e9t\u00e9\u00e9crit ?\u00e0 quel article scientifique correspond ce r\u00e9sum\u00e9 ? In Actes de DEFT, Montpellier, France, 1er juillet. TALN.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Diachronic word embeddings reveal statistical laws of semantic change", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Hamilton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Leskovec", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1489--1501", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamilton, W. L., Leskovec, J., and Jurafsky, D. (2016). Diachronic word embeddings reveal statistical laws of semantic change. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguis- tics (Volume 1: Long Papers), pages 1489-1501, Berlin, Germany, August. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Estimating document focus time", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Jatowt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Man Au Yeung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Tanaka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jatowt, A., man Au Yeung, C., and Tanaka, K. (2013). Es- timating document focus time. In CIKM.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Improving temporal language models for determining time of nontimestamped documents", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Kanhabua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "N\u00f8rv\u00e5g", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "5173", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kanhabua, N. and N\u00f8rv\u00e5g, K. (2008). Improving tem- poral language models for determining time of non- timestamped documents. volume 5173, pages 358-370, 09.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Impact of ocr quality on named entity linking", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Linhares Pontes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hamdi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Sidere", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Doucet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Digital Libraries at the Crossroads of Digital Information for the Future", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "102--115", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linhares Pontes, E., Hamdi, A., Sidere, N., and Doucet, A. (2019). Impact of ocr quality on named entity linking. In Adam Jatowt, et al., editors, Digital Libraries at the Crossroads of Digital Information for the Future, pages 102-115, Cham. Springer International Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Estimating time models for news article excerpts", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Berberich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mishra, A. and Berberich, K. (2016). Estimating time models for news article excerpts. In CIKM.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Temporal text ranking and automatic dating of texts", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Niculae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Dinu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Ciobanu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "17--21", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Niculae, V., Zampieri, M., Dinu, L., and Ciobanu, A. M. (2014). Temporal text ranking and automatic dating of texts. In Proceedings of the 14th Conference of the Euro- pean Chapter of the Association for Computational Lin- guistics, volume 2: Short Papers, pages 17-21, Gothen- burg, Sweden, April. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Behind the times: Detecting epoch changes using large corpora", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Popescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Strapparava", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Popescu, O. and Strapparava, C. (2013). Behind the times: Detecting epoch changes using large corpora. In IJC- NLP.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "ICDAR 2019 Competition on Post-OCR Text Correction", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Rigaud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Doucet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Coustaty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J.-P", |
|
"middle": [], |
|
"last": "Moreux", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "15th International Conference on Document Analysis and Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rigaud, C., Doucet, A., Coustaty, M., and Moreux, J.- P. (2019). ICDAR 2019 Competition on Post-OCR Text Correction. In 15th International Conference on Document Analysis and Recognition, Sydney, Australia, September.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Development of a morphological and syntactic lexicon of Old French", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Sagot", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "26\u00e8me Conf\u00e9rence sur le Traitement Automatique des Langues Naturelles (TALN)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sagot, B. (2019). Development of a morphological and syntactic lexicon of Old French. In 26\u00e8me Conf\u00e9rence sur le Traitement Automatique des Langues Naturelles (TALN), Toulouse, France, July.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Ixagroupehudiac: A multiple approach system towards the diachronic evaluation of texts", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Salaberri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Salaberri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Arregi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Zapirain", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 9th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "840--845", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Salaberri, H., Salaberri, I., Arregi, O., and Zapirain, B. n. (2015). Ixagroupehudiac: A multiple approach system towards the diachronic evaluation of texts. In Proceed- ings of the 9th International Workshop on Semantic Eval- uation (SemEval 2015), pages 840-845, Denver, Col- orado, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Preliminary recommendations on text typology", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Sinclair", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sinclair, J. (1996). Preliminary recommendations on text typology. Technical report, EAGLES (Expert Advisory Group on Language Engineering Standards), June.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Stylistic changes for temporal text classification", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Stajner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "TSD", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stajner, S. and Zampieri, M. (2013). Stylistic changes for temporal text classification. In TSD.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Impact analysis of ocr quality on research tasks in digital archives. SpringerLink", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Traub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Van Ossenbruggen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Hardman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "252--263", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Traub, M. C., van Ossenbruggen, J., and Hardman, L. (2015). Impact analysis of ocr quality on research tasks in digital archives. SpringerLink, pages 252-263, Sep.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"text": "", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"text": "", |
|
"content": "<table><tr><td>|pd \u2212 rd|</td><td>0</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td></tr><tr><td colspan=\"2\">SIMILARITY 1</td><td colspan=\"6\">0.97 0.88 0.75 0.60 0.46 0.31 . . .</td></tr></table>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"text": "Similarity measure between pd the predicted decade and rd the real decade", |
|
"content": "<table><tr><td colspan=\"3\">N-gram size Decision Tree Random Forest</td></tr><tr><td>1 \u2264 N \u2264 1</td><td>F = 31.62</td><td>F = 35.32</td></tr><tr><td/><td>S = 0.851</td><td>S = 0.877</td></tr><tr><td>1 \u2264 N \u2264 2</td><td>F = 51.23</td><td>F = 58.86</td></tr><tr><td/><td>S = 0.907</td><td>S = 0.931</td></tr><tr><td>1 \u2264 N \u2264 3</td><td>F = 59.49</td><td>F = 66.436</td></tr><tr><td/><td>S = 0.926</td><td>S = 0.947</td></tr><tr><td>1 \u2264 N \u2264 4</td><td>F = 64.6</td><td>F = 71.43</td></tr><tr><td/><td>S = 0.933</td><td>S = 0.950</td></tr><tr><td>1 \u2264 N \u2264 5</td><td>F = 65.1</td><td>F = 69.8</td></tr><tr><td/><td>S = 0.933</td><td>S = 0.945</td></tr><tr><td>2 \u2264 N \u2264 2</td><td>F = 51.17</td><td>F = 58.30</td></tr><tr><td/><td>S = 0.905</td><td>S = 0.928</td></tr><tr><td>2 \u2264 N \u2264 3</td><td>F = 59.94</td><td>F = 67.16</td></tr><tr><td/><td>S = 0.927</td><td>S = 0.948</td></tr><tr><td>2 \u2264 N \u2264 4</td><td>F = 64.06</td><td>F = 70.53</td></tr><tr><td/><td>S = 0.934</td><td>S = 0.948</td></tr><tr><td>2 \u2264 N \u2264 5</td><td>F = 65.00</td><td>F = 70.87</td></tr><tr><td/><td>S = 0.934</td><td>S = 0.948</td></tr></table>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"text": "Extract of the results obtained on the GALLICA dataset. Macro F-measure (F) and Similarity (S)", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |