|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:48:56.857050Z" |
|
}, |
|
"title": "Language Resources for Historical Newspapers: the Impresso Collection", |
|
"authors": [ |
|
{ |
|
"first": "Maud", |
|
"middle": [], |
|
"last": "Ehrmann", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Digital Humanities Laboratory", |
|
"institution": "Zurich University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Matteo", |
|
"middle": [], |
|
"last": "Romanello", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Digital Humanities Laboratory", |
|
"institution": "Zurich University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Clematide", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Digital Humanities Laboratory", |
|
"institution": "Zurich University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Phillip", |
|
"middle": [ |
|
"Benjamin" |
|
], |
|
"last": "Str\u00f6bel", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Digital Humanities Laboratory", |
|
"institution": "Zurich University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rapha\u00ebl", |
|
"middle": [], |
|
"last": "Barman", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Digital Humanities Laboratory", |
|
"institution": "Zurich University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Following decades of massive digitization, an unprecedented amount of historical document facsimiles can now be retrieved and accessed via cultural heritage online portals. If this represents a huge step forward in terms of preservation and accessibility, the next fundamental challenge-and real promise of digitization-is to exploit the contents of these digital assets, and therefore to adapt and develop appropriate language technologies to search and retrieve information from this 'Big Data of the Past'. Yet, the application of text processing tools on historical documents in general, and historical newspapers in particular, poses new challenges, and crucially requires appropriate language resources. In this context, this paper presents a collection of historical newspaper data sets composed of text and image resources, curated and published within the context of the 'impresso-Media Monitoring of the Past' project. With corpora, benchmarks, semantic annotations and language models in French, German and Luxembourgish covering ca. 200 years, the objective of the impresso resource collection is to contribute to historical language resources, and thereby strengthen the robustness of approaches to non-standard inputs and foster efficient processing of historical documents.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Following decades of massive digitization, an unprecedented amount of historical document facsimiles can now be retrieved and accessed via cultural heritage online portals. If this represents a huge step forward in terms of preservation and accessibility, the next fundamental challenge-and real promise of digitization-is to exploit the contents of these digital assets, and therefore to adapt and develop appropriate language technologies to search and retrieve information from this 'Big Data of the Past'. Yet, the application of text processing tools on historical documents in general, and historical newspapers in particular, poses new challenges, and crucially requires appropriate language resources. In this context, this paper presents a collection of historical newspaper data sets composed of text and image resources, curated and published within the context of the 'impresso-Media Monitoring of the Past' project. With corpora, benchmarks, semantic annotations and language models in French, German and Luxembourgish covering ca. 200 years, the objective of the impresso resource collection is to contribute to historical language resources, and thereby strengthen the robustness of approaches to non-standard inputs and foster efficient processing of historical documents.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Digitization efforts are slowly but steadily contributing an increasing amount of facsimiles of cultural heritage documents. As a result, it is nowadays commonplace for many memory institutions to create and maintain digital repositories which offer rapid, time-and location-independent access to documents (or surrogates thereof), allow to virtually bring together disperse collections, and ensure the preservation of fragile documents thanks to on-line consultation (Terras, 2011) . Beyond this great achievement in terms of preservation and accessibility, the next fundamental challenge -and real promise of digitization-is to exploit the contents of these digital assets, and therefore to adapt and develop appropriate language technologies to search and retrieve information from this 'Big Data of the Past' (Kaplan and di Lenardo, 2017) . In this regard, and following decisive grassroots efforts led by libraries to improve OCR (Optical Character Recognition) technology and generalize full-text search over historical document collections (see, e.g., the Impact 1 and Trove 2 projects), the Digital Humanities (DH), Natural Language Processing (NLP) and Computer Vision (CV) communities are pooling forces and expertise to push forward the processing of facsimiles, as well as the extraction, linking and representation of the complex information enclosed in transcriptions of digitized collections. These interdisciplinary efforts were recently streamlined within the far-reaching Europe Time Machine project 3 which ambitions, in general, the application of artificial intelligence technologies on cultural heritage data and, in particular, to achieve text understanding of historical material. This momentum is particularly vivid in the domain of digitized newspaper archives, for which there has been a notable increase of research initiatives over the last years. Besides individual works dedicated to the development of tools (Yang et al., 2011b; Dinarelli and Rosset, 2012; Moreux, 2016; Wevers, 2019) , or to the usage of those tools (Kestemont et al., 2014; Lansdall-Welfare et al., 2017) , events such as evaluation campaigns (Rigaud et al., 2019; Clausner et al., 2019) or hackathons 4 based on digitized newspaper data sets have multiplied. Additionally, several large consortia projects proposing to apply computational methods to historical newspapers at scale have recently emerged, including ViralTexts 5 , Oceanic Exchanges 6 , impresso 7 , NewsEye 8 , and Living with Machines 9 (Ridge et al., 2019) . These efforts are contributing a pioneering set of text and image analysis tools, system architectures, and graphical user interfaces covering several aspects of historical newspaper processing and exploitation. Yet, the application of text processing tools on historical documents in general, and historical newspapers in partic-ular, poses new challenges (Sporleder, 2010; Piotrowski, 2012) . First, the language under study is mostly of earlier stage(s) and usually features significant orthographic variation (Bollmann, 2019) . Second, due to the acquisition process and/or document conservation state, inputs can be extremely noisy, with errors which do not resemble tweet misspellings or speech transcription hesitations for which adapted approaches have already been devised (Linhares Pontes et al., 2019a; Chiron et al., 2017; Smith and Cordell, 2018) . Further, and due to the diversity of the material in terms of genre, domain and time period, language resources such as corpora, benchmarks and knowledge bases that can be used for lexical and semantic processing of historical texts are rather sparse and heterogeneous. Finally, archives and texts from the past are not as anglophone as in today's information society, making multilingual resources and processing capacities even more essential (Neudecker and Antonacopoulos, 2016) . Overall, and as demonstrated by Vilain et al. (2007) , the transfer of NLP approaches from one domain or time period to another is not straightforward, and performances of tools initially developed for homogeneous texts of the immediate past are affected when applied on historical material (Ehrmann et al., 2016) . This echoes the statement of Plank (2016), according to whom what is considered as standard or canonical data in NLP (i.e. contemporary news genre) is more a historical coincidence than an objective evidence or reality: non-canonical, heterogeneous, biased and noisy data is more prevalent than is commonly believed, and historical texts are no exception. In this respect, and in light of the above, it can therefore be considered that historical language(s) belong to the family of less-resourced languages for which further efforts are still needed. To help alleviate this deficiency, this paper presents a 'fullstack' historical newspaper data set collection composed of text and image resources produced, curated and published within the context of the 'impresso -Media Monitoring of the Past' project 10 . These resources relates to historical newspaper material in French, German and Luxembourgish and include: OCRed texts together with their related facsimiles and language models, benchmarks for article segmentation, OCR black letter and named entity processing, and multi-layer semantic annotations (named entities, topic modeling and text reuse). The objective of the impresso resource collection is to contribute to historical language resources, and thereby strengthen the robustness of approaches to non-standard inputs and foster efficient processing of historical documents. More precisely, these resources can support:", |
|
"cite_spans": [ |
|
{ |
|
"start": 468, |
|
"end": 482, |
|
"text": "(Terras, 2011)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 813, |
|
"end": 842, |
|
"text": "(Kaplan and di Lenardo, 2017)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1940, |
|
"end": 1960, |
|
"text": "(Yang et al., 2011b;", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 1961, |
|
"end": 1988, |
|
"text": "Dinarelli and Rosset, 2012;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1989, |
|
"end": 2002, |
|
"text": "Moreux, 2016;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 2003, |
|
"end": 2016, |
|
"text": "Wevers, 2019)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 2050, |
|
"end": 2074, |
|
"text": "(Kestemont et al., 2014;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 2075, |
|
"end": 2105, |
|
"text": "Lansdall-Welfare et al., 2017)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 2144, |
|
"end": 2165, |
|
"text": "(Rigaud et al., 2019;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 2166, |
|
"end": 2188, |
|
"text": "Clausner et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 2505, |
|
"end": 2525, |
|
"text": "(Ridge et al., 2019)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 2885, |
|
"end": 2902, |
|
"text": "(Sporleder, 2010;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 2903, |
|
"end": 2920, |
|
"text": "Piotrowski, 2012)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 3041, |
|
"end": 3057, |
|
"text": "(Bollmann, 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 3310, |
|
"end": 3341, |
|
"text": "(Linhares Pontes et al., 2019a;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 3342, |
|
"end": 3362, |
|
"text": "Chiron et al., 2017;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 3363, |
|
"end": 3387, |
|
"text": "Smith and Cordell, 2018)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 3835, |
|
"end": 3871, |
|
"text": "(Neudecker and Antonacopoulos, 2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 3906, |
|
"end": 3926, |
|
"text": "Vilain et al. (2007)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 4165, |
|
"end": 4187, |
|
"text": "(Ehrmann et al., 2016)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 4996, |
|
"end": 4998, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "(a) NLP research and applications dealing with historical language, with a set of 'ready-to-parse' historical texts covering 150 years in French and German, and a set of language models;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "(b) Model training and performance assessment for three tasks, namely article segmentation, OCR transcription and named entity processing (for the first time on such material for the latter), with manually transcribed and annotated corpora; 10 https://impresso-project.ch (c) Historical corpus exploration and digital history research, with various stand-off semantic annotations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 241, |
|
"end": 243, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "To the best of our knowledge, the impresso resource collection represents the most complete historical newspapers data set series to date. In the following, we introduce the impresso project (Section 2), present the impresso resource collection (Sections 3, 4 and 5), account for major existing historical language resources (Section 6), and conclude (Section 7).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "2. Mining 200 years of historical newspapers: the impresso project impresso -Media Monitoring of the Past' is an interdisciplinary research project in which a team of computational linguists, designers and historians collaborate on the semantic indexing of a multilingual corpus of digitized historical newspapers 11 . The primary goals of the project are to apply text mining techniques to transform noisy and unstructured textual content into semantically indexed, structured, and linked data; to develop innovative visualization interfaces to enable the seamless exploration of complex and vast amounts of historical data 12 ; to identify needs on the side of historians which may also translate into new text mining applications and new ways to study history; and to reflect on the usage of digital tools in historical sciences from a practical, methodological, and epistemological point of view. In doing so, impresso addresses the challenges posed by large-scale collections of digitized newspapers, namely:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "(1) newspaper silos: due to legal restrictions and digitisation policy constraints, data providers (libraries, archives and publishers) are bound to provide incomplete, nonrepresentative collections which have been subjected to digitization and OCR processing of varying quality; (2) big, messy data: newspaper digital collections are characterised by incompleteness, duplicates, and abundant inconsistencies;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "(3) noisy, historical text: imperfect OCR, faulty article segmentation and lack of appropriate linguistic resources greatly affect image and text mining algorithms' robustness; (4) large and heterogeneous corpora: processing and exploitation requires a solid system architecture and infrastructure, and interface design should favor efficient search and discovery of relevant content; and (5) transparency: critical assessment of inherent biases in exploratory tools, digitized sources and annotations extracted from them is paramount for an informed usage of data in digital scholarship context. With respect to source processing, impresso applies and improve a series of state-of-the-art natural language and image processing components which produce, in fine, a largescale, multilingual, semantically indexed historical newspaper collection. The various lexical and semantic annotations generated thereof are combined and delivered to digital scholars via a co-designed, innovative and powerful graphical user interface. Furthermore, and this is the focus of the present paper, those sources and annotations are also published apart from the interface for further usage by cultural heritage partners, and DH and/or NLP communities. Finally, some of the text and image mining components are subject to systematic evaluation, for which ground truth data are produced. All publicly released impresso resources, i.e. corpora, benchmarks and annotations, are published on the project's website 13 and on impresso zenodo community 14 with detailed documentation. Table 2 summarizes the links and DOIs of the datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1560, |
|
"end": 1567, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "The first resource is a set of normalized, 'ready-to-process' newspaper textual corpora which, for copyrights reasons, do not correspond to the full impresso newspaper collection accessible through the interface.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Impresso Corpora", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "impresso gathers a consortium of Swiss and Luxembourgish research and cultural heritage institutions and focuses primarily on sources of these countries in French, German, and Luxembourgish. Provided by its partners, 15 impresso original sources correspond as of November 2019 to 76 newspapers. Concretely speaking, sources consist of either both OCR output and images, or only OCR. Regarding images, they are thus either served online via the IIIF Image API 16 of the impresso infrastructure, or accessed directly via the data provider's IIIF endpoint . Text and layout acquisition outputs (i.e. OCR and OLR) come, for their part, in a variety of METS/ALTO format flavors, sometimes complemented by proprietary formats of private service providers. Overall, the current collection amounts to ca. 77TB, text and image combined. More newspaper titles in French and English will be acquired and ingested during the last year of the project.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Original Sources", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "Original sources are subject to copyright law and impresso has received permission from its partners to use them, provided that legal terms of use are respected upon online access and/or download. More specifically, digital documents are subject to two different right statements: (1) public domain, or unrestricted: documents are no longer in copyright and may be used without restriction for all purposes, including commercial; (2) academic use, or restricted: documents are still under copyright and their use is restricted to personal and/or academic purposes, with the possibility to download the text or not. The present impresso corpus release includes unrestricted documents and a part of restricted ones (for personal and academic usage). Depending on negotiations with data providers and on the inclusion of new collections, the situation is very likely to evolve in the future and impresso original source release will be complemented.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Legal Framework", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "The original files provided by our partners encode the structure and the text of digital objects according to METS/ALTO XML library standards. METS (Metadata Encoding and Transmission Standard 17 ) encodes various metadata as well as information on the physical and logical structure of the object, while ALTO (Analyzed Layout and Text Object 18 ) represents information of OCR recognized texts, i.e. describes text and layout information (coordinates of columns, lines and words on a page). While very precise and complete, these XML files contain more information than necessary in a text mining context, and are cumbersome to process. Moreover, METS and ALTO schemas are flexible and libraries usually adapts them according to their text acquisition capacities, resulting in a variety of input variants. Combined with the existence of different file hierarchies, source identifiers and image mappings, as well as other OCR/OLR proprietary formats, these inputs require, to say the least, a great deal of processing before they can finally be parsed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Source Processing", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "To this end, each library input is converted into 'canonical' files where information is encoded according to impresso JSON schemas, 19 from which 'ready-to-process' files can easily be derived. Defined iteratively and shared with other newspaper projects, these JSON schemas act as a central, common format which a) allows the seamless processing of various data sources; b) preserves the information necessary for NLP processing and interface rendering only; and c) drastically reduces file sizes, thereby allowing easier processing in distributed environments. Schemas and converters are published and documented online and are not described further here. An important point to mention, though, is that we mint and assign unique, canonical identifiers to newspaper issues, pages as well as content items (i.e. newspaper contents below the page level such as articles, advertisements, images, tables, weather forecasts, obituaries, etc.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Source Processing", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "The impresso corpora are released in two versions, both distributed as compressed archives (bzip2) of data in newline-delimited JSON format: 1) the 'canonical' version, with a fine-grained logical and physical representation of newspaper contents, including image coordinates and 2) the 'ready-to-process' version, which offer 'reconstructed' content item full texts, that is to say continuous strings non divided by OCR token units. This reconstruction significantly reduces the overhead when parsing the entire dataset, which amount to 145GB compressed (restricted and unrestricted).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Release", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "The impresso corpus currently contains 76 newspapers: 50 from Switzerland and 26 from Luxembourg. AS mentioned previously, contents are subject to different license regimens, depending on the permissions given by cultural heritage institutions and rights holders. In Table 1 we provide some basic statistics about our corpora, divided by license type. The release will contain all contents in the public domain (unrestricted), as well as those available for academic use and for which the text can be downloaded (restricted with download, negotiations ongoing). The released corpora amount to almost 10 billion tokens of textual contents, covering a time span of more than 200 years (see Fig. 1 ), and contain roughly 3 million images.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 267, |
|
"end": 274, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 688, |
|
"end": 694, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Release", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "Contextual information about digital collections is essential and we attempt to provide as much information as possible, even though this is neither the core expertise nor part of the main objectives of the project. Impresso newspaper metadata corresponds to descriptive (e.g. title, dates, place of publication), structural (issue, page, content items), and administrative metadata (file timestamps, file creator, preservation metadata). These metadata were given by cultural institutions and, most of the time, completed by the impresso team (either technical or descriptive metadata). Since this metadata set does not intend to replace library professional information but is rather meant for statistical 'data science' purposes, each record contains links to authority information such as the original bibliographic notice and the library portal. Impresso newspaper metadata is encoded in JSON format, covers all newspapers and is published under a CC-BY 4.0 license. 20", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metadata", |
|
"sec_num": "3.5." |
|
}, |
|
{ |
|
"text": "In order to support the training and evaluation of some processing components, several benchmarks were produced. They include material from both restricted and unrestricted collections, for which right clearance has been achieved. All are released under open licenses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Impresso Benchmarks", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "Exploration and automatic processing of digitized newspaper sources is greatly hindered by the sometimes low qual-20 https://creativecommons.org/licenses/by/ 4.0/ ity of legacy OCR and OLR (when present) processes: content items are incorrectly transcribed and incorrectly segmented. In an effort to address these shortcomings, impresso developed an approach for content item recognition and classification exploiting both textual and visual features (Barman et al., 2020) . The objectives were, on the one hand, to filter out noisy or unwanted material before the application of subsequent NLP processes (e.g. removing all meteo tables and title banners before running topic modeling or text re-use) and, on the other hand, to allow faceted search on content item types (e.g. search \"", |
|
"cite_spans": [ |
|
{ |
|
"start": 451, |
|
"end": 472, |
|
"text": "(Barman et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Article Segmentation Ground Truth", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "xyz\" in type of items 'editorials').", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Article Segmentation Ground Truth", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "To this end, a set of newspaper images was manually annotated and several experiments were conducted (Barman, 2019). Although newspaper content items can be of many types, 21 we choose to focus on four classes that were deemed suitable for developing a first prototype, as well as meaningful within the impresso context, as follows: Three newspapers from the French speaking part of Switzerland covering a period of ca. 200 years (1798-2017) were considered for the annotation. 22 To obtain a diachronic ground truth, three issues were sampled every three or five years for the whole duration of each newspaper. The sampled images were annotated using the VGG Image Annotator v.2.0.8 (VIA), a simple web interface for annotating images with annotation export in JSON format (Dutta and Zisserman, 2019) . Concretely speaking, each annotated image is associated with the list of its regions (i.e. coordinates) and their corresponding labels. Overall, Figure 1 : Distribution of tokens over years (whitespace tokenization was applied).", |
|
"cite_spans": [ |
|
{ |
|
"start": 478, |
|
"end": 480, |
|
"text": "22", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 774, |
|
"end": 801, |
|
"text": "(Dutta and Zisserman, 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 949, |
|
"end": 957, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Article Segmentation Ground Truth", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "4624 page scans were annotated -among which 1208 with at least one annotation -, amounting to 2773 annotated regions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Article Segmentation Ground Truth", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "Work is ongoing and once models would have reached a satisfying level of precision, they will be applied on the whole collection to filter out elements before text processing and enable faceted search over content item types. This article segmentation data set (annotations and images) is published under a CC-BY-SA 4.0 license, using VIA as well as the standard object annotation COCO 23 (Lin et al., 2014) formats.", |
|
"cite_spans": [ |
|
{ |
|
"start": 389, |
|
"end": 407, |
|
"text": "(Lin et al., 2014)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Article Segmentation Ground Truth", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "We created a publicly available ground truth (i.e., a manually corrected version of text) for black letter newspaper print for the assessment of the OCR quality of the German-language Neue Z\u00fcrcher Zeitung (NZZ) (Str\u00f6bel, Phillip Benjamin and Clematide, Simon, 2019) . We sampled one front page per year for the long period the NZZ has been published in black letter (1780 -1947) , resulting in a diachronic ground truth of 167 pages. We used the Transkribus 24 tool do complete the annotations. We published the ground truth as tiff images and corresponding XML files 25 . First experiments on improving the OCR for this kind of data showed that elaborated deep learning models (Weidemann et al., 2018) reach character accuracies of 99.52% and that they are transferable to other newspaper data and to better images than present in the ground truth .", |
|
"cite_spans": [ |
|
{ |
|
"start": 211, |
|
"end": 265, |
|
"text": "(Str\u00f6bel, Phillip Benjamin and Clematide, Simon, 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 378, |
|
"text": "(1780 -1947)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 568, |
|
"end": 570, |
|
"text": "25", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 678, |
|
"end": 702, |
|
"text": "(Weidemann et al., 2018)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Black Letter OCR Ground Truth", |
|
"sec_num": "4.2." |
|
}, |
|
{ |
|
"text": "After image segmentation and transcription, the last impresso benchmark relates to an information extraction task, named entity (NE) processing. NE processing tools are increasingly being used in the context of historical documents and research activities in this domain target texts of different nature (e.g. museum records, state-related documents, genealogical data, historical newspapers) and different tasks (NE recognition and classification, entity linking, or both). Experiments involve different time periods, focus on different domains, and use different typologies. This great diversity demonstrates how many and varied the needs -and the challenges-are, but also makes performance comparison difficult, if not impossible. In this context, the impresso project organises a CLEF 2020 Evaluation Lab, named 'HIPE' (Identifying Historical People, Places and other Entities) (Ehrmann et al., 2020) . 26 The HIPE shared task puts forward two NE processing tasks, namely: (1) the named entity recognition and classification (NERC) task, with two sub-tasks of increasing level of difficulty with high-level vs. finer-grained entity types, and (2) the named entity linking task. The HIPE corpus is composed of content items from the impresso Swiss and Luxembourgish newspapers, as well as from American newspapers, on a diachronic basis. 27 For each language, articles of four different newspapers were sampled on a decade time-bucket basis, according to the time span of the newspaper (longest duration spans ca. 200 years). More precisely, articles were first randomly sampled from each year of the considered decades, with the constraints of having a title and more than 100 characters. Subsequently to this sampling, a manual triage was applied in order to keep journalistic content only and to remove undesirable items such as feuilleton, cross-words, weather tables, time-schedules, obituaries, and what a human could not even read because of OCR noise. This material was manually annotated according to HIPE annotation guidelines, derived from the Quaero annotation guide. 28 Originally designed for the annotation of 'ex-", |
|
"cite_spans": [ |
|
{ |
|
"start": 882, |
|
"end": 904, |
|
"text": "(Ehrmann et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 907, |
|
"end": 909, |
|
"text": "26", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1341, |
|
"end": 1343, |
|
"text": "27", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2083, |
|
"end": 2085, |
|
"text": "28", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Processing Ground Truth", |
|
"sec_num": "4.3." |
|
}, |
|
{ |
|
"text": "26 See CLEF 2020: https://clef2020.clef-initiative.eu and HIPE: https://impresso.github.io/CLEF-HIPE-2020", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Processing Ground Truth", |
|
"sec_num": "4.3." |
|
}, |
|
{ |
|
"text": "27 From the Swiss National Library, the Luxembourgish National Library, and the Library of Congress, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Processing Ground Truth", |
|
"sec_num": "4.3." |
|
}, |
|
{ |
|
"text": "28 See the original Quaero guidelines: http://www.quaero.org/media/files/bibliographie/quaero-guideannotation-2011.pdf tended' named entities (i.e. more than the 3 or 4 traditional entity classes) in French speech transcriptions, Quaero guidelines have furthermore been used on historic press corpora (Rosset et al., 2012) . HIPE slightly recast and simplifies them, considering only a subset of entity types and components, as well as of linguistic units eligible as named entities 29 . The annotation campaign was carried out by the task organizers with the support of trilingual collaborators. We used INCEpTION as an annotation tool (Klie et al., 2018) , with the visualisation of image segments alongside OCR transcriptions. For each language, a sub-sample of the corpus was annotated by two annotators and inter-annotator agreement is computed, before and after an adjudication. As of March 2020, 21000 top-level entity mentions were annotated and linked to Wikidata. For each task and language the corpus is divided into training, dev and test data sets, with the only exception of English for which only dev and test are produced. These manually annotated materials are released in IOB format with hierarchical information. Even though many evaluation campaigns on NE were organized over the last decades, 30 only one considered French historical texts (Galibert et al., 2012) and, to the best of our knowledge, this is the first multilingual, diachronic named entity-annotated historical corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 301, |
|
"end": 322, |
|
"text": "(Rosset et al., 2012)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 483, |
|
"end": 485, |
|
"text": "29", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 637, |
|
"end": 656, |
|
"text": "(Klie et al., 2018)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1361, |
|
"end": 1384, |
|
"text": "(Galibert et al., 2012)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Processing Ground Truth", |
|
"sec_num": "4.3." |
|
}, |
|
{ |
|
"text": "Finally, a wealth of annotations as well as language models are automatically computed over the whole impresso collection. They include: at lexical level, linguistic preprocessing (lemmatisation and historical spelling normalization), word embeddings, OCR quality assessment and n-grams; at referential level, NE mentions and linked entities; at conceptual level, topics, topic models, and topicannotated content items; at collection level, text reuse clusters and passages; and, finally, visual signatures of photographs and pictures contained in newspapers. These enrichments of our content items are represented as stand-off annotations and are released under CC-BY or CC-BY-SA 4.0 license. However, not all annotation data sets are fully ready at the moment; the following sections present those which are part of the current release.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Impresso Lexical and Semantic Annotations", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "In order to automatically assess the loss of information due to OCR noise, we compute a simple OCR quality measure inspired by spell-checker approach of Alex and Burns (2014). In our case, it basically corresponds to the proportion of words of an historical newspaper article that can be found in the Wikipedia corpus of the corresponding language. Given the multilingual nature of our texts and the large number of names in newspapers, this offers a practical approach, especially for German where normal nouns and proper nouns are capitalized. Before actually comparing the words, we normalise diacritical marks the same way as our text retrieval system Solr does before indexing the content. Therefore, for instance, we consider the frequently occurring OCR errors B\u00e4le or B\u00e0le as equivalent to the correct spelling of the town B\u00e2le, because they are all normalized to the same string bale. The reason for this normalisation approach in OCR assessment is that we want to inform our impresso users about the real loss of recall they should expect when actually running standard keyword queries over our text collection (B\u00e4le will be found even is the user search for B\u00e0le, but B\u00e2te would not return any result, and this is the loss we want to account for). The OCR quality assessment is a number between 0 and 1 that is distributed along with our data as stand-off annotation for each content item. Impresso interface users will probably quickly grasp the meaning of the numbers by just being exposed to texts and their corresponding OCR quality assessment, and learn to interpret them with respect to the type of article, e.g. stock market prices with many abbreviations that will lower the score. As our approach is unsupervised, we need to formally evaluate it similar to Alex and Burns (2014) by testing whether there is a reasonable correlation between the automatically computed quality and some ground truth character error rate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "OCR quality assessment", |
|
"sec_num": "5.1." |
|
}, |
|
{ |
|
"text": "As mentioned earlier, the full impresso collection cannot be distributed due to copyright restrictions. Having the material at hand, however, allows us to compute historical newspapers genre-specific lexical resources such as word embeddings that can be distributed to the public. Specifically, we build classical type-level word embeddings with fasttext 31 . This choice is motivated by fasttext's support for subword modeling (Bojanowski et al., 2016) , which is a useful feature in the presence of OCR errors. There has been recent work on top of fasttext for bringing the embeddings of misspelled words even closer to the correct versions via supervised training material (Piktus et al., 2019) . Well-known drawbacks of type-level word embeddings are that (a) they enforce their users to adhere to the same tokenisation rules that their producers applied and, more severely, (b) they cannot differentiate the meanings of ambiguous words, or words that change their meaning in certain constructions. The simple character-based approach proposed by Akbik et al. (2018) (\"contextualized string embeddings\" 32 ) has successfully tackled these two problems and led to excellent results for NER. Our own experiments with NER on noisy French historical newspapers additionally proved the resilience of these embeddings trained on in-domain material to OCR errors (Bircher, 2019) . Within the impresso interface, word embeddings are mainly used for suggesting similar words in the keyword search (including cross-lingual), thereby supporting query expansion by semantic or OCR noise variants. Query expansion is also offered for the lexical n-gram viewers. Two types of word embeddings derived from the impresso text material are published: Character-based contextualized string embeddings and classical type-level word embeddings with subword information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 428, |
|
"end": 453, |
|
"text": "(Bojanowski et al., 2016)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 676, |
|
"end": 697, |
|
"text": "(Piktus et al., 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1051, |
|
"end": 1070, |
|
"text": "Akbik et al. (2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1360, |
|
"end": 1375, |
|
"text": "(Bircher, 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embeddings", |
|
"sec_num": "5.2." |
|
}, |
|
{ |
|
"text": "The impresso web application supports faceted search with respect to language-specific topics (French, German, Luxembourgish). We use the well-known MALLET 33 toolkit, which allows the training and inference of topic models with Latent Dirichlet Allocation (Blei et al., 2003) . First, linguistic preprocessing is applied to the data. For POS tagging, the spaCy 34 library is used because of its robustness in the presence of OCR noise. However, spaCy lemmatization is not always very satisfactory and further analyzers and sources are used to complement its results. For German, we rely mostly on the broad-coverage morphological analyser GERTWOL 35 , and are currently working on the problem of lemmatization of words with historical spelling and/or OCR errors (see Jurish (2012) for earlier work based on finite-state approaches for German). For French, we use the full-form lexicon Morphalou 36 (ATILF, 2019) to complete lemma information not provided by spaCy. Dealing with the low-resourced Luxembourgish language is more difficult (although spaCy now has PoS tagging support for this language), mostly because of many spelling variants and reforms this language has seen over the last 150 years. Then, under the assumption that topics are more interpretable if they consist of nouns and proper nouns only, we reduce the corpus size by excluding all other parts of speech based on the information obtained from spaCy. As an additional benefit, this filtering drastically reduces the number of tokens of the corpus that topic modeling has to deal with. Next, topics are computed on this reduced, preprocessed material. Although the German part of the collection is of reasonable size, the French material is however still too big for MALLET and sampling of articles containing at least 10 nouns and/or proper nouns is applied. In order to keep the facets for topic search manageable and interpretable, and at the same time account for the diversity of contents found in newspapers, we set the number of topics for German and French to 100. For the French topics, we directly fit topic distributions for about a third of our overall data. Topic inference with the model trained on the sample is used for the remaining articles. Topic inference also solves the problem that our collections is continuously growing, and recomputing topic models from scratch each time is not feasible. Additionally, historians prefer to have semantically stable topic models for their work. Therefore, we also apply topic inference on newly added German texts. Topic models, as well as topics and content item topic assignments are released in JSON format. 37 Topics are also available within the impresso web interface, where they (a) serve as search facets, i.e., users can restrict their search results to articles containing only certain topics; or (b) the 33 http://mallet.cs.umass.edu 34 https://spacy.io/ 35 http://www2.lingsoft.fi/doc/gertwol 36 http://www.cnrtl.fr/lexiques/morphalou 37 Also documented online at https://github.com/impresso/impresso-schemas users can select topics as entry points to explore the topic modeling based soft-clustering of articles over the entire corpus; or (c) they provide the basis for an article recommender system based on topic distribution similarity. Future work will focus on the evolution of topics over time and cross-lingual topic modeling.", |
|
"cite_spans": [ |
|
{ |
|
"start": 257, |
|
"end": 276, |
|
"text": "(Blei et al., 2003)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 2642, |
|
"end": 2644, |
|
"text": "37", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topic Models", |
|
"sec_num": "5.3." |
|
}, |
|
{ |
|
"text": "Text reuse can be defined as the meaningful reiteration of text beyond the simple repetition of common language. It is such a broad concept that it can be understood at different levels and studied in a large variety of contexts. In a publishing or teaching context, plagiarism can be seen as text re-use, should portions of someone else's text be repeated without appropriate attribution. In the context of literary studies, text re-use is often used as a synonym for literary phenomena like allusions, paraphrases and direct quotations. Text reuse is a very common phenomenon in historical newspapers too. Nearly-identical articles may be repurposed in multiple newspapers as they stem from the very same press release. In newspapers from the period before the advent of press agencies, text reuse instances can be interesting to study the dynamics of information spreading, especially when newspapers in the same language but from different countries are considered. In more recent newspapers text reuse is very frequent due to cut-and-paste journalism being an increasingly common practice. We used passim 38 (Smith et al., 2015) to perform the automatic detection of text reuse. Passim is an open source software that uses n-grams to effectively search for alignment candidates, the Smith-Waterman algorithm to perform the local alignment of candidate document pairs, and singlelink clustering to group similar passages into text reuse clusters. As a pre-processing step we used passim to identify boilerplate within our corpus. This step allows us to reduce the input size of approximately 10%, by removing mostly short passages that are repeated within the same newspaper within a time window of 30 days. We then run passim on the entire corpus after boilerplate passages have been removed: passim outputs all text passages that were identified as belonging to a text reuse cluster. As opposed to boilerplate detection, text reuse detection explicitly targets reuse instances across two or more sources (i.e. newspapers). We post-process passim's output to add the following information:", |
|
"cite_spans": [ |
|
{ |
|
"start": 1113, |
|
"end": 1133, |
|
"text": "(Smith et al., 2015)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Reuse", |
|
"sec_num": "5.4." |
|
}, |
|
{ |
|
"text": "\u2022 size, i.e. the number of text passages in the cluster;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Reuse", |
|
"sec_num": "5.4." |
|
}, |
|
{ |
|
"text": "\u2022 lexical overlap, expressed as the proportion of unique tokens shared by all passages in a cluster;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Reuse", |
|
"sec_num": "5.4." |
|
}, |
|
{ |
|
"text": "\u2022 time delta: the overall time window covered by a given cluster (expressed in number of days);", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Reuse", |
|
"sec_num": "5.4." |
|
}, |
|
{ |
|
"text": "\u2022 time gap: following Salmi et al. (2019), we compute the longest gap (expressed in number of days) between the publication of any two passages in a cluster. Table 2 : Impresso datasets DOIs.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 158, |
|
"end": 165, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Text Reuse", |
|
"sec_num": "5.4." |
|
}, |
|
{ |
|
"text": "This information is added to each text reuse cluster with the goal of easing the retrieval as well as the analysis of detected text reuse. Since passim detects several million clusters in the entire impresso corpus, we need to further characterize each cluster if we want to enable historians to find instances of text reuse that are of interest to them. Each of these additional dimensions characterizes a certain aspects of reuse: lexical overlap allows for distinguishing almost exact copies of a piece of news from re-phrasings or paraphrases; time delta is an indicator of the longevity of a given piece of news; and, finally, time gap captures the viral nature of news spreading, especially its pace of publication. We release as a resource (in JSON format) the boilerplate and text reuse passages as detected by passim, as well as the additional information we compute at cluster-level. This data can be used to filter out duplicates from the input corpus, given the detrimental effects that such duplicates have on semantic models (e.g. topics, word embeddings) (Schofield et al., 2017) . Text reuse information is currently used in the impresso interface as an additional navigation aid, as it points users to existing reuses of the news article in focus. Future upgrades of the interface will include a dedicated text reuse explorer, which will allow users to search over and browse through all text reuse clusters, and to filter them based on several criteria (i.e. size, lexical overlap, time gap, time delta).", |
|
"cite_spans": [ |
|
{ |
|
"start": 1070, |
|
"end": 1094, |
|
"text": "(Schofield et al., 2017)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Reuse", |
|
"sec_num": "5.4." |
|
}, |
|
{ |
|
"text": "This section briefly summarizes previous efforts with respect to historical language resources. We focus here on historical newspapers and refer the reader to Sporleder (2010) and Piotrowski (2012) for further information on historical language in general. Digitized newspaper corpora, understood here as consisting of both images and OCR, primarily exist thanks to the considerable efforts of national libraries, either as individual institutions, either as part of consortia, e.g. the Europeana Newspaper project (Neudecker and Antonacopoulos, 2016) . Those institutions are the custodians of these digital assets which, after having been hidden behind digital library portals for long, are now increasingly making their way to the public via APIs and/or data dumps (e.g. the French National Library APIs 39 and the National Library of Luxembourg open data portal 40 ). Impresso corpora are by no means meant to compete with these repositories, but rather to complement them, with derived, working 'secondary' versions of the material in a form that is suitable for NLP needs. To our knowledge, and since corpus preparation is often done by private companies mandated to develop digital portals, no 'ready-to-process' set of historical newspaper corpus such as the impresso one exists. Several instances of OCR and article segmentation benchmarks exists thanks to, among others, the long-standing series of conference and shared tasks organized by the document analysis community 41 impresso annotated data sets are, in this regard, not new but complementary: German Black Letter ground truth is not common and, given the variety of historical newspaper material, article segmentation over page scans of different sources is beneficial. With respect to word embeddings, the companion website 42 of Hamilton et al. (2016) provides word2vec embeddings for French and German derived from Google ngrams. More recently, Riedl (2019) released German word embedding data sets derived from historical newspapers. In the last years, a few gold standards were publicly released for named entities: Galibert et al. (2012) shared a French named entity annotated corpus of historical newspapers from the end of the 19 th century and Neudecker (2016) published four data sets of 100 pages each for Dutch, French, and German (including Austrian) as part of the Europeana Newspapers project. Besides, Linhares Pontes et al. (2019b) have recently published a data set for the evaluation of NE linking where various types of OCR noise were introduced. In comparison, the HIPE corpus has a broader temporal coverage and additionally covers English. Regarding topic modeling, Yang et al. (2011a) gives an overview of earlier work on historical newspapers. Finally, as far as text reuse is concerned, very few resources and/or benchmarks were published to date. Franzini et al. (2018) have published a ground truth dataset to benchmark the detection of a specific type of text reuse (i.e. literary quotations). The Viral Texts project has published an online interface, the Viral Texts Explorer 43 , which makes searchable and explorable text reuse clusters extracted from 19th century newspapers. A similar online interface was provided also by Salmi et al. (2019) for 13 million text reuse clusters extracted from Finnish press (1771-1920).", |
|
"cite_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 197, |
|
"text": "Piotrowski (2012)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 515, |
|
"end": 551, |
|
"text": "(Neudecker and Antonacopoulos, 2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1800, |
|
"end": 1822, |
|
"text": "Hamilton et al. (2016)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1917, |
|
"end": 1929, |
|
"text": "Riedl (2019)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 2090, |
|
"end": 2112, |
|
"text": "Galibert et al. (2012)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 2222, |
|
"end": 2238, |
|
"text": "Neudecker (2016)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 2658, |
|
"end": 2677, |
|
"text": "Yang et al. (2011a)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 2843, |
|
"end": 2865, |
|
"text": "Franzini et al. (2018)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 3227, |
|
"end": 3246, |
|
"text": "Salmi et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "We have presented a series of historical newspaper datasets -the impresso resource collection -composed of corpora, benchmarks, semantic annotations and language models in French, German, Luxembourgish and English covering ca. 200 years. Produced in the context of a collaborative, interdisciplinary project which aims at enabling critical text mining of 200 years of newspapers, this collection includes different types of resources that could support the needs of several communities. The textual corpora we release are large-scale, diachronic, multilingual and with real-world OCR quality. Their availability will foster further research on NLP methods applied to historical texts (e.g. OCR postcorrection, semantic drift, named entity processing). Similarly, our benchmarks will fill an important gap in the adaptation of existing approaches via e.g. transfer learning, as well as enable performance assessment and comparisons. Language models will naturally find their use in many applications, while lexical and semantic annotations will support historical corpus exploration and be suitable for use at public participatory events such as hackathons. As future work we attempt to integrate more textual material (French and English notably), to release additional annotations (image visual signatures, historical n-grams and named entities) and to serialize our data in more formats in addition to JSON.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Perspectives", |
|
"sec_num": "7." |
|
}, |
|
{ |
|
"text": "http://www.impact-project.eu 2 https://trove.nla.gov.au 3 https://www.timemachine.eu", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The project is funded by the Swiss National Science Foundation for a period of three years (2017-2020) and involves three main applicants: DHLAB from the Ecole polytechnique f\u00e9d\u00e9rale de Lausanne (EPFL), ICL from the University of Zurich, and C 2 DH from the University of Luxembourg.12 https://impresso-project.ch/app/#", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://impresso-project.ch/project/datasets 14 https://zenodo.org/communities/impresso 15 Namely: the Swiss National Library, the National Library of Luxembourg, the Media Center and State Archives of Valais, the Swiss Economic Archives, the journal Le Temps (Ringier group), the journal Neue Z\u00fcrcher Zeitung, and other local and international data providers.16 Defined by the International Image Interoperability Framework, an interoperable technology and community framework for image delivery: https://iiif.io", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.loc.gov/standards/mets 18 https://www.loc.gov/standards/alto 19 https://github.com/impresso/impresso-schemas", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There is little to no agreement among historians and/or librarians about a 'base' newspaper content items taxonomy.22 The Gazette de Lausanne, the Impartial and the Journal de Gen\u00e8ve.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://cocodataset.org/#format-data 24 https://transkribus.eu/Transkribus 25 https://github.com/impresso/NZZ-black-letter-ground-truth", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://fasttext.cc 32 https://github.com/zalandoresearch/flair", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/dasmiq/passim", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://api.bnf.fr 40 https://data.bnl.lu/data/historical-newspapers41 In particular the ICDAR conferences, e.g. http://icdar2019.org/. 42 https://nlp.stanford.edu/projects/histwords", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We warmly thank the impresso team as well as student collaborators Camille Watter, Stefan Bircher, Julien Nguyen Dang for their annotation work. Authors also gratefully acknowledge the financial support of the Swiss National Science Foundation (SNSF) for the project impresso -Media Monitoring of the Past under grant number CR-SII5 173719.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "8." |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Contextual string embeddings for sequence labeling", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Akbik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Blythe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Vollgraf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1638--1649", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akbik, A., Blythe, D., and Vollgraf, R. (2018). Contextual string embeddings for sequence labeling. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1638-1649, Santa Fe, New Mexico, USA, August. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Estimating and rating the quality of optically character recognised text", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Burns", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "97--102", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex, B. and Burns, J. (2014). Estimating and rating the quality of optically character recognised text. pages 97- 102. ACM Press.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Morphalou. ORTOLANG (Open Resources and TOols for LANGuage", |
|
"authors": [ |
|
{ |
|
"first": "Atilf", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "ATILF. (2019). Morphalou. ORTOLANG (Open Re- sources and TOols for LANGuage) -www.ortolang.fr. 43 https://viraltexts.northeastern.edu/clusters", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Combining Visual and Textual Features for Semantic Segmentation of Historical Newspapers (submitted)", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Barman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ehrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Clematide", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Oliveira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barman, R., Ehrmann, M., Clematide, S., Oliveira, S. A., and Kaplan, F. (2020). Combining Visual and Textual Features for Semantic Segmentation of Historical News- papers (submitted). Journal of Data Mining and Digital Humanities. https://arxiv.org/abs/2002.06144.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Historical newspaper semantic segmentation using visual and textual features", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Barman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barman, R. (2019). Historical newspaper semantic seg- mentation using visual and textual features. Master the- sis, EPFL.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Toulouse and Cahors refer to locations, but T<<i*louse and Caa.Qrs as well. A Neural Approach for detecting Named Entities in Digitized Historical Newspapers", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Bircher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bircher, S. (2019). Toulouse and Cahors refer to loca- tions, but T<<i*louse and Caa.Qrs as well. A Neural Approach for detecting Named Entities in Digitized His- torical Newspapers. Master thesis, Zurich University.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Latent dirichlet allocation", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Journal of machine Learning research", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "993--1022", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Blei, D. M., Ng, A. Y., and Jordan, M. I. (2003). La- tent dirichlet allocation. Journal of machine Learning research, 3(Jan):993-1022.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bojanowski, P., Grave, E., Joulin, A., and Mikolov, T. (2016). Enriching word vectors with subword informa- tion. CoRR, abs/1607.04606.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A large-scale comparison of historical text normalization systems", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bollmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3885--3898", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bollmann, M. (2019). A large-scale comparison of histor- ical text normalization systems. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Papers), pages 3885-3898. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Impact of OCR Errors on the Use of Digital Libraries: Towards a Better Access to Information", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Chiron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Doucet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Coustaty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Visani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J.-P", |
|
"middle": [], |
|
"last": "Moreux", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 17th ACM/IEEE Joint Conference on Digital Libraries, JCDL '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "249--252", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chiron, G., Doucet, A., Coustaty, M., Visani, M., and Moreux, J.-P. (2017). Impact of OCR Errors on the Use of Digital Libraries: Towards a Better Access to Informa- tion. In Proceedings of the 17th ACM/IEEE Joint Con- ference on Digital Libraries, JCDL '17, pages 249-252, Piscataway, NJ, USA. IEEE Press.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "PRImA, DMAS2019, Competition on Digitised Magazine Article Segmentation", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Clausner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Antonacopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Pletschacher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Wilms", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Claeyssens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Clausner, C., Antonacopoulos, A., Pletschacher, S., Wilms, L., and Claeyssens, S. (2019). PRImA, DMAS2019, Competition on Digitised Magazine Article Segmenta- tion (ICDAR 2019).", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Tree-structured named entity recognition on OCR data: Analysis, processing and results", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Dinarelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Rosset", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1266--1272", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dinarelli, M. and Rosset, S. (2012). Tree-structured named entity recognition on OCR data: Analysis, processing and results. In LREC, pages 1266-1272.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "The VIA annotation software for images, audio and video", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Dutta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 27th ACM International Conference on Multimedia, MM '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dutta, A. and Zisserman, A. (2019). The VIA annotation software for images, audio and video. In Proceedings of the 27th ACM International Conference on Multimedia, MM '19, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Diachronic Evaluation of NER Systems on Old Newspapers", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ehrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Colavizza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Rochat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 13th Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "97--107", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ehrmann, M., Colavizza, G., Rochat, Y., and Kaplan, F. (2016). Diachronic Evaluation of NER Systems on Old Newspapers. In Proceedings of the 13th Conference on Natural Language Processing (KONVENS 2016)), pages 97-107. Bochumer Linguistische Arbeitsberichte.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Introducing the CLEF 2020 HIPE Shared Task: Named Entity Recognition and Linking on Historical Newspapers", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ehrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Romanello", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Clematide", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Bircher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "European Conference on Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ehrmann, M., Romanello, M., Clematide, S., and Bircher, S. (2020). (submitted) Introducing the CLEF 2020 HIPE Shared Task: Named Entity Recognition and Linking on Historical Newspapers. In European Conference on In- formation Retrieval, Lisbon, Portugal, April.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Using and evaluating TRACER for an Index fontium computatus of the Summa contra Gentiles of Thomas Aquinas", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Franzini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Moritz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Marco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Passarotti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cuore", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Fifth Italian Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franzini, G., Moritz, M., Marco, B., Passarotti, M., and Cuore, S. (2018). Using and evaluating TRACER for an Index fontium computatus of the Summa contra Gen- tiles of Thomas Aquinas. In Elena Cabrio, et al., editors, Proceedings of the Fifth Italian Conference on Compu- tational Linguistics (CLiC-it 2018).", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Extended named entities annotation on ocred documents: From corpus constitution to evaluation campaign", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Galibert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Rosset", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Grouin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Zweigenbaum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Quintard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Galibert, O., Rosset, S., Grouin, C., Zweigenbaum, P., and Quintard, L. (2012). Extended named entities annota- tion on ocred documents: From corpus constitution to evaluation campaign. In Nicoletta Calzolari (Conference Chair), et al., editors, Proceedings of the Eight Interna- tional Conference on Language Resources and Evalua- tion (LREC'12), Istanbul, Turkey, may. European Lan- guage Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Diachronic word embeddings reveal statistical laws of semantic change", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Hamilton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Leskovec", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1489--1501", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamilton, L. W., Leskovec, J., and Jurafsky, D. (2016). Diachronic word embeddings reveal statistical laws of semantic change. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguis- tics (Volume 1: Long Papers), pages 1489-1501. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Finite-state canonicalization techniques for historical German. doctoral thesis, Universit\u00e4t Potsdam", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Jurish", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "517--55789", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jurish, B. (2012). Finite-state canonicalization techniques for historical German. doctoral thesis, Universit\u00e4t Pots- dam. https://nbn-resolving.org/urn:nbn:de:kobv:517- opus-55789.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Big Data of the Past", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Di Lenardo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaplan, F. and di Lenardo, I. (2017). Big Data of the Past. Frontiers in Digital Humanities, 4.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Mining the twentieth century's history from the time magazine corpus", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kestemont", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Karsdorp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "D\u00fcring", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kestemont, M., Karsdorp, F., and D\u00fcring, M. (2014). Min- ing the twentieth century's history from the time maga- zine corpus. EACL 2014, page 62.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "The inception platform: Machineassisted and knowledge-oriented interactive annotation", |
|
"authors": [ |
|
{ |
|
"first": "J.-C", |
|
"middle": [], |
|
"last": "Klie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bugert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Boullosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "De Castilho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Klie, J.-C., Bugert, M., Boullosa, B., de Castilho, R. E., and Gurevych, I. (2018). The inception platform: Machine- assisted and knowledge-oriented interactive annotation. In Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations, pages 5-9.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Content analysis of 150 years of british periodicals", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Lansdall-Welfare", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Sudhahar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Thompson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cristianini", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the National Academy of Sciences", |
|
"volume": "114", |
|
"issue": "4", |
|
"pages": "457--465", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lansdall-Welfare, T., Sudhahar, S., Thompson, J., Lewis, J., , and Cristianini, N. (2017). Content analysis of 150 years of british periodicals. Proceedings of the National Academy of Sciences, 114(4):E457-E465.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Microsoft coco: Common objects in context", |
|
"authors": [ |
|
{ |
|
"first": "T.-Y", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Maire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Belongie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Hays", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Perona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Ramanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Doll\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "European conference on computer vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "740--755", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lin, T.-Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Doll\u00e1r, P., and Zitnick, C. L. (2014). Microsoft coco: Common objects in context. In Eu- ropean conference on computer vision, pages 740-755. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Impact of ocr quality on named entity linking", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Linhares Pontes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hamdi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Sidere", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Doucet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Digital Libraries at the Crossroads of Digital Information for the Future", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "102--115", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linhares Pontes, E., Hamdi, A., Sidere, N., and Doucet, A. (2019a). Impact of ocr quality on named entity linking. In Digital Libraries at the Crossroads of Digital Infor- mation for the Future, pages 102-115. Springer LNCS, October.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Impact of ocr quality on named entity linking", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Linhares Pontes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hamdi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Sidere", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Doucet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Digital Libraries at the Crossroads of Digital Information for the Future", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "102--115", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linhares Pontes, E., Hamdi, A., Sidere, N., and Doucet, A. (2019b). Impact of ocr quality on named entity linking. In Adam Jatowt, et al., editors, Digital Libraries at the Crossroads of Digital Information for the Future, pages 102-115, Cham. Springer International Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Innovative Approaches of Historical Newspapers: Data Mining, Data Visualization, Semantic Enrichment", |
|
"authors": [ |
|
{ |
|
"first": "J.-P", |
|
"middle": [], |
|
"last": "Moreux", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of IFLA WLIC 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moreux, J.-P. (2016). Innovative Approaches of Histori- cal Newspapers: Data Mining, Data Visualization, Se- mantic Enrichment. In Proceedings of IFLA WLIC 2016, page 17, Columbus, OH.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Making Europe's Historical Newspapers Searchable", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Neudecker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Antonacopoulos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "2016 12th IAPR Workshop on Document Analysis Systems (DAS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "405--410", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Neudecker, C. and Antonacopoulos, A. (2016). Making Europe's Historical Newspapers Searchable. In 2016 12th IAPR Workshop on Document Analysis Systems (DAS), pages 405-410, Santorini, Greece, April. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "An open corpus for named entity recognition in historic newspapers", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
";" |
|
], |
|
"last": "Neudecker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Neudecker, C. (2016). An open corpus for named entity recognition in historic newspapers. In Nicoletta Calzo- lari (Conference Chair), et al., editors, Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), Paris, France, may. Euro- pean Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Misspelling oblivious word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Piktus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Edizel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ferreira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Silvestri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3226--3234", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piktus, A., Edizel, N. B., Bojanowski, P., Grave, E., Fer- reira, R., and Silvestri, F. (2019). Misspelling oblivi- ous word embeddings. In Proceedings of the 2019 Con- ference of the North American Chapter of the Associa- tion for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 3226-3234, Minneapolis, Minnesota, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Natural language processing for historical texts", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Piotrowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Synthesis Lectures on Human Language Technologies", |
|
"volume": "5", |
|
"issue": "2", |
|
"pages": "1--157", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotrowski, M. (2012). Natural language processing for historical texts. Synthesis Lectures on Human Language Technologies, 5(2):1-157.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "What to do about non-standard (or noncanonical) language in NLP", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Plank", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 13th Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Plank, B. (2016). What to do about non-standard (or non- canonical) language in NLP. In Proceedings of the 13th Conference on Natural Language Processing (KON- VENS 2016)). Bochumer Linguistische Arbeitsberichte.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "The past, present and future of digital scholarship with newspaper collections", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ridge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Colavizza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Brake", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ehrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J.-P", |
|
"middle": [], |
|
"last": "Moreux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prescott", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ridge, M., Colavizza, G., Brake, L., Ehrmann, M., Moreux, J.-P., and Prescott, A. (2019). The past, present and fu- ture of digital scholarship with newspaper collections. page 9. Multi-paper panel presented at the 2019 Digital Humanities Conference, Utrecht, July 2019.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "German Word Embeddings for ShiCo based on historic newspapers", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Riedl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Riedl, M. (2019). German Word Embeddings for ShiCo based on historic newspapers, June.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "ICDAR 2019 Competition on Post-OCR Text Correction", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Rigaud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Doucet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Coustaty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J.-P", |
|
"middle": [], |
|
"last": "Moreux", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "15th International Conference on Document Analysis and Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rigaud, C., Doucet, A., Coustaty, M., and Moreux, J.- P. (2019). ICDAR 2019 Competition on Post-OCR Text Correction. In 15th International Conference on Document Analysis and Recognition, Sydney, Australia, September.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Structured Named Entities in two distinct press corpora: Contemporary Broadcast News and Old Newspapers", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Rosset", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cyril", |
|
"middle": [], |
|
"last": "Grouin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Karen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Galibert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Olivier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juliette", |
|
"middle": [], |
|
"last": "Kahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Zweigenbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "6th Linguistics Annotation Workshop (The LAW VI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "40--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rosset, S., Grouin, Cyril, Fort, Karen, Galibert, Olivier, Kahn, Juliette, and Zweigenbaum, Pierre. (2012). Struc- tured Named Entities in two distinct press corpora: Con- temporary Broadcast News and Old Newspapers. In 6th Linguistics Annotation Workshop (The LAW VI), pages 40-48, Jeju, South Korea, July.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "The long-term reuse of text in the Finnish press", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Salmi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Rantala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Vesanto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Ginter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "CEUR Workshop Proceedings", |
|
"volume": "2364", |
|
"issue": "", |
|
"pages": "394--544", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Salmi, H., Rantala, H., Vesanto, A., and Ginter, F. (2019). The long-term reuse of text in the Finnish press, 1771-1920. CEUR Workshop Proceedings, 2364:394- 544.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Quantifying the effects of text duplication on semantic models", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Schofield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Thompson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Mimno", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2737--2747", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Schofield, A., Thompson, L., and Mimno, D. (2017). Quantifying the effects of text duplication on semantic models. In Proceedings of the 2017 Conference on Em- pirical Methods in Natural Language Processing, pages 2737-2747, Copenhagen, Denmark, September. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "A Research Agenda for Historical and Multilingual Optical Character Recognition", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Cordell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Smith, D. A. and Cordell, R. (2018). A Research Agenda for Historical and Multilingual Optical Character Recog- nition. https://ocr.northeastern.edu/.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Computational Methods for Uncovering Reprinted Texts in Antebellum Newspapers", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Cordell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Mullen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "27", |
|
"issue": "", |
|
"pages": "1--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Smith, D. A., Cordell, R., and Mullen, A. (2015). Com- putational Methods for Uncovering Reprinted Texts in Antebellum Newspapers. American Literary History, 27(3):E1-E15, sep.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Natural Language Processing for Cultural Heritage Domains", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Sporleder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Language and Linguistics Compass", |
|
"volume": "4", |
|
"issue": "9", |
|
"pages": "750--768", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sporleder, C. (2010). Natural Language Processing for Cultural Heritage Domains. Language and Linguistics Compass, 4(9):750-768.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Improving OCR of Black Letter in Historical Newspapers: The Unreasonable Effectiveness of HTR Models on Low-Resolution Images", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Str\u00f6bel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Clematide", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Digital Humanities", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Str\u00f6bel, P. B. and Clematide, S. (2019). Improving OCR of Black Letter in Historical Newspapers: The Unreason- able Effectiveness of HTR Models on Low-Resolution Images. In Proceedings of the Digital Humanities 2019, (DH2019). CLARIAH.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "The Rise of Digitization", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Terras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Digitisation Perspectives", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Terras, M. M. (2011). The Rise of Digitization. In Ruth Rikowski, editor, Digitisation Perspectives, pages 3-20. SensePublishers, Rotterdam.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Entity Extraction is a Boring Solved Problem: Or is It?", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Vilain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Lubar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Human Language Technologies 2007: The Conference of the North American Chapter of the Association for Computational Linguistics; Companion Volume, Short Papers, NAACL-Short '07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "181--184", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vilain, M., Su, J., and Lubar, S. (2007). Entity Extraction is a Boring Solved Problem: Or is It? In Human Lan- guage Technologies 2007: The Conference of the North American Chapter of the Association for Computational Linguistics; Companion Volume, Short Papers, NAACL- Short '07, pages 181-184. Association for Computa- tional Linguistics. event-place: Rochester, New York.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "HTR Engine Based on NNs P2 Building Deep Architectures with TensorFlow", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Weidemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Gr\u00fcning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Labahn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weidemann, M., Michael, J., Gr\u00fcning, T., and Labahn, R. (2018). HTR Engine Based on NNs P2 Building Deep Architectures with TensorFlow. Technical report.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Using word embeddings to examine gender bias in Dutch newspapers", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Wevers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 1st International Workshop on Computational Approaches to Historical Language Change", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "92--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wevers, M. (2019). Using word embeddings to exam- ine gender bias in Dutch newspapers, 1950-1990. In Proceedings of the 1st International Workshop on Com- putational Approaches to Historical Language Change, pages 92-97, Florence, Italy, August. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Topic modeling on historical newspapers", |
|
"authors": [ |
|
{ |
|
"first": "T.-I", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Torget", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 5th ACL-HLT Workshop on Language Technology for Cultural Heritage, Social Sciences, and Humanities", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "96--104", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang, T.-I., Torget, A., and Mihalcea, R. (2011a). Topic modeling on historical newspapers. In Proceedings of the 5th ACL-HLT Workshop on Language Technology for Cultural Heritage, Social Sciences, and Humanities, pages 96-104, Portland, OR, USA, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Topic modeling on historical newspapers", |
|
"authors": [ |
|
{ |
|
"first": "T.-I", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Torget", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 5th ACL-HLT Workshop on Language Technology for Cultural Heritage, Social Sciences, and Humanities (LaTecH)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "96--104", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang, T.-I., Torget, A. J., and Mihalcea, R. (2011b). Topic modeling on historical newspapers. In Proceedings of the 5th ACL-HLT Workshop on Language Technology for Cultural Heritage, Social Sciences, and Humanities (LaTecH), pages 96-104.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td># issues</td><td>79,746</td><td>337,163</td><td>187,860</td><td>604,769</td></tr><tr><td># pages</td><td>399,363</td><td>4,132,821</td><td>399,363</td><td>4,931,547</td></tr><tr><td># tokens</td><td>572,030,104</td><td>9,374,592,395</td><td colspan=\"2\">2,641,896,310 12,588,518,809</td></tr><tr><td># content items</td><td>1,461,700</td><td>38,948,561</td><td>4,269,189</td><td>44,679,450</td></tr><tr><td># images</td><td>32,964</td><td>3,030,126</td><td>417,732</td><td>3,480,822</td></tr></table>", |
|
"text": "Number of items Unrestricted Restricted (with download) Restricted (w/o download) Total", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Global statistics on the impresso corpora.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |