|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T02:09:31.896121Z" |
|
}, |
|
"title": "Automatic Matching of Paintings and Descriptions in Art-Historic Archives using Multimodal Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Bartz", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Hasso Plattner Institute University of Potsdam", |
|
"location": { |
|
"postCode": "14482", |
|
"settlement": "Potsdam", |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Nitisha", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Hasso Plattner Institute University of Potsdam", |
|
"location": { |
|
"postCode": "14482", |
|
"settlement": "Potsdam", |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ralf", |
|
"middle": [], |
|
"last": "Krestel", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Hasso Plattner Institute University of Potsdam", |
|
"location": { |
|
"postCode": "14482", |
|
"settlement": "Potsdam", |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Cultural heritage data plays a pivotal role in the understanding of human history and culture. A wealth of information is buried in art-historic archives which can be extracted via digitization and analysis. This information can facilitate search and browsing, help art historians to track the provenance of artworks and enable wider semantic text exploration for digital cultural resources. However, this information is contained in images of artworks, as well as textual descriptions or annotations accompanied with the images. During the digitization of such resources, the valuable associations between the images and texts are frequently lost. In this project description, we propose an approach to retrieve the associations between images and texts for artworks from art-historic archives. To this end, we use machine learning to generate text descriptions for the extracted images on the one hand, and to detect descriptive phrases and titles of images from the text on the other hand. Finally, we use embeddings to align both, the descriptions and the images.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Cultural heritage data plays a pivotal role in the understanding of human history and culture. A wealth of information is buried in art-historic archives which can be extracted via digitization and analysis. This information can facilitate search and browsing, help art historians to track the provenance of artworks and enable wider semantic text exploration for digital cultural resources. However, this information is contained in images of artworks, as well as textual descriptions or annotations accompanied with the images. During the digitization of such resources, the valuable associations between the images and texts are frequently lost. In this project description, we propose an approach to retrieve the associations between images and texts for artworks from art-historic archives. To this end, we use machine learning to generate text descriptions for the extracted images on the one hand, and to detect descriptive phrases and titles of images from the text on the other hand. Finally, we use embeddings to align both, the descriptions and the images.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In the age of big data, there is increasing attention on the digitization of cultural heritage collections and their availability as digital libraries to aid wider access and exploration of this previously opaque data. A number of museums, libraries, and other cultural institutions (e.g. Europeana, Getty Research Institute, Wildenstein Plattner Institute, and the Rijks Museum 1 ) have invested significant efforts to digitize their collections consisting of old art books, catalogues for art exhibitions and auctions, etc. Initiatives, such as OpenGLAM 2 , promote collaboration among these cultural institutions for research on shared resources. The volume and heterogeneity of these digitized collections necessitates automated analysis of this data. Modern data science tools can assist in deriving insights from the images, as well as from the textual content of these collections. In addition to the actual content, cultural heritage datasets, such as art-historic corpora, are often enriched with meta-data that can provide useful information and context for automatic tools. One example of meta-data is the associations between the artwork images and the texts contained in catalogues and books. Art-historic corpora contain textual information in the form of captions of images (often depicting the titles of artworks), as well as the description of artworks including their creator, year, and, in case of auction catalogues, price information. During the digitization step, images from physical pages are typically scanned and the text is retrieved by means of Optical Character Recognition (OCR) technology. Although these techniques have been fairly improved to minimize the error rate, the information about the association between the images of artworks and their corresponding text excerpts is not retained. This is especially true when multiple images and text excerpts are present on a single page. The availabil-ity of such associations between images and texts can help with multimodal semantic analysis of artworks, wherein important descriptive features can be identified from the images, while the corresponding text might provide additional background information about the style and context of the artwork and the artist. In some cases, the text can also provide further evidence and confirmation for the features inferred from the images, and vice versa. For example, consider a case where image analysis correctly ascertains that a particular painting depicts a house with mountains in the background, and the associated text description not only contains terms such as mountains and house but also mentions that this painting is in landscape orientation, then the painting can be categorized and tagged as such. This meta-data derived from the associations between images and texts could be particularly useful in search and exploration of lost artworks, where only a few indicators about the sought-out artworks are known beforehand. An art historian would greatly benefit from image-text associations while retrieving images of artworks from a database by searching on the basis of a few keywords (style, motif, orientation and other features) that can be found in the corresponding description texts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "The matching of images with texts can be done at various levels of granularity based on the size of the data under consideration. Each level poses different challenges and demands unique techniques to achieve desired results. For instance, multiple images on a single catalogue page have a higher likelihood to belong to a common theme or topic. Matching at this level requires techniques to differentiate between similar images, as well as to extract the most distinctive keyphrases from the text descriptions. When the task is scaled to a large corpus of multiple types of catalogue pages, the matching will need to be performed between a large number of possible pairs. To narrow down the search space, the images could be classified on the basis of their art styles by identifying and leveraging common themes in the corpus. This would be followed by matching on basis of differentiating characteristics as before. In this work, we propose a generic framework to retrieve the associations between images of artworks and texts from arthistoric archives by means of automated approaches. Due to the multimodal nature of this task, our solution is comprised of a combination of techniques from computer vision, as well as natural language processing. While image captioning techniques are employed to identify and tag the images of artworks, Named Entity Recognition (NER) and keyphrase identification techniques are used for the extraction of descriptive terms from the text excerpts. Lastly, to establish the associations between the images and texts, we perform the representation and alignment of the description texts obtained from above techniques via embeddings. This paper describes an ongoing project on multimodal analysis of cultural heritage datasets. The project is a part of a larger collaboration 3 with the Wildenstein Plattner Institute 4 that was founded to promote scholarly research on cultural heritage collections. The contributions of this paper are : (1) Introduce the novel task of matching artwork images to their text descriptions in art-historic corpora. (2) Propose a framework to extract descriptive features from images and texts of artworks and perform their semantic alignment. (3) Identify evaluation methods for measuring the performance of the framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "The multimodal nature of our proposed framework is rooted in two different fields. The first field is text analytics for automatic understanding of the semantics of extracted texts. The second field is image analysis for the extraction of the semantics of images. In this section, we present and outline the relation of previous work that is related to the analysis of cultural heritage data for each of the two fields.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Analysis of cultural heritage data has been of active research interest for the digital humanities where various works have performed use case driven text analysis of digitized art corpora. For example, there is existing work on performing event extraction for historical events (Segers et al., 2011) and finding parallel passages from cultural heritage archives (Harris et al., 2018) . There have been several attempts to create knowledge repositories in the form of knowledge graphs and linked open data collections from art data (Hyv\u00f6nen and Rantala, 2019; Van Hooland and Verborgh, 2014; Dijkshoorn et al., 2018; De Boer et al., 2012) . While these works lay emphasis on extracting facts and useful information from the text, they do not necessarily identify the most representative terms and keyphrases from the text. NER is a related task which has been performed for the cultural heritage domain in several papers (Van Hooland et al., 2013; Ehrmann et al., 2016; Jain and Krestel, 2019) . The challenges of this task in the context of noisy OCRed datasets have been discussed previously (Rodriquez et al., 2012) and (Kettunen and Ruokolainen, 2017 ). While we", |
|
"cite_spans": [ |
|
{ |
|
"start": 279, |
|
"end": 300, |
|
"text": "(Segers et al., 2011)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 384, |
|
"text": "(Harris et al., 2018)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 532, |
|
"end": 559, |
|
"text": "(Hyv\u00f6nen and Rantala, 2019;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 560, |
|
"end": 591, |
|
"text": "Van Hooland and Verborgh, 2014;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 592, |
|
"end": 616, |
|
"text": "Dijkshoorn et al., 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 617, |
|
"end": 638, |
|
"text": "De Boer et al., 2012)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 921, |
|
"end": 947, |
|
"text": "(Van Hooland et al., 2013;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 948, |
|
"end": 969, |
|
"text": "Ehrmann et al., 2016;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 970, |
|
"end": 993, |
|
"text": "Jain and Krestel, 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1094, |
|
"end": 1118, |
|
"text": "(Rodriquez et al., 2012)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1123, |
|
"end": 1154, |
|
"text": "(Kettunen and Ruokolainen, 2017", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Analytics", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "Automatic image analysis in the domain of art-historical research has been studied in several earlier research works (Huang et al., 2018; Elgammal et al., 2018; Yang et al., 2018; Thomas and Kovashka, 2019) . One of the greatest problems of automatic image analysis in the art domain is the availability of suitable training data (Huang et al., 2018; Elgammal et al., 2018; Thomas and Kovashka, 2019) . Methods in related work rely on fine-tuning image classification models, pre-trained on photographs, to overcome the problem of the non-availability of training data. Using such pre-trained models often leads to the problem of domain-adaptation, which arises because available models are pre-trained on photos and not on images of artworks. Thomas and Kovashka (Thomas and Kovashka, 2019) propose to use methods of neural style transfer (Gatys et al., 2015) to generate a sufficient amount of training data, based on photographs and a set of artworks that are used as baseline style images. All in all, related methods mainly concentrate on the problem of image classification (Thomas and Kovashka, 2019) , style, genre, and artist classification (Huang et al., 2018; Elgammal et al., 2018; Lecoutre et al., 2017) , or time period and type classification (Yang et al., 2018) . So far, there has been no work on performing automatic image captioning for artworks, which is one of the focus points of our work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 137, |
|
"text": "(Huang et al., 2018;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 160, |
|
"text": "Elgammal et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 179, |
|
"text": "Yang et al., 2018;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 206, |
|
"text": "Thomas and Kovashka, 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 330, |
|
"end": 350, |
|
"text": "(Huang et al., 2018;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 373, |
|
"text": "Elgammal et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 400, |
|
"text": "Thomas and Kovashka, 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 764, |
|
"end": 791, |
|
"text": "(Thomas and Kovashka, 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 840, |
|
"end": 860, |
|
"text": "(Gatys et al., 2015)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1080, |
|
"end": 1107, |
|
"text": "(Thomas and Kovashka, 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1150, |
|
"end": 1170, |
|
"text": "(Huang et al., 2018;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1171, |
|
"end": 1193, |
|
"text": "Elgammal et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1194, |
|
"end": 1216, |
|
"text": "Lecoutre et al., 2017)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1258, |
|
"end": 1277, |
|
"text": "(Yang et al., 2018)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Image Analysis", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "A natural idea is to embed the features extracted from both modalities into a common semantic subspace (Kiros et al., 2014; Liu et al., 2019) , where a model is learned, that embeds text and image features in a shared high dimensional embedding space. The goal of the embedding is to bring the concepts, obtained from text and image analysis that have the same meaning, as close to each other as possible. In our work, we want to follow this basic embedding approach and use the combined information from text analysis models and image analysis models for the matching of an image to its corresponding text in art-historic corpora.", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 123, |
|
"text": "(Kiros et al., 2014;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 124, |
|
"end": 141, |
|
"text": "Liu et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combination of Text Analysis and Image Analysis", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "In this section, we discuss our proposed framework for performing the matching of artwork images to associated texts and describe the different components in detail. We envision to create an automated pipeline that takes the raw scan of a page of any catalogue or book as input and performs several operations on it: (1) Text is localized and recognized using off-the-shelf OCR software. In the remainder of this section, we will explain the challenges and possible approaches towards a solution for each of the sub-tasks, namely text analysis, image analysis and semantic alignment of text and images.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Matching Paintings and Descriptions", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "An intuitive way to match an image in an art catalogue with its description is via the title of the artwork. Assuming that the description of any given artwork will include the title, a human would be able to identify the relevant image on the page by matching the title with the caption of the image. Since any caption text associated with the images (including their titles) is usually not available after the digitization process, the matching for digitized datasets has to be performed solely on the basis of the features or tags that are extracted from the images. However, matching on the basis of titles alone is still not a viable approach due to several reasons. Firstly, as discussed in (Jain and Krestel, 2019) , the identification of titles of artworks in textual descriptions is itself a non-trivial task and shows sub-optimal performance with existing NER tools. Secondly, even for a scenario where the titles are correctly identified from the text descriptions, they are not always sufficiently representative of the artworks. An example would be modern art paintings where the titles may not be descriptive of the motif in the painting and thus not helpful for matching. Titles are also not useful in the case of old portrait paintings where it is difficult to uniquely identify an image from the name of the depicted person (which is also the title in most cases). This illustrates that titles of artworks might not necessarily contain the required semantic information for the matching of texts with artwork images. As our approach relies on semantic alignment for the matching, it is important to focus on identifying the most salient parts of the description of paintings in the text. To this end, there are two methods we would like to investigate. The first is to look at keyphrase extraction, which identifies and extracts the most representative phrases from a document. Supervised approaches for keyphrase identification are popular (Jiang et al., 2009) , however they need annotated training data which is tricky to generate for art datasets. Owing to the subjective nature of the domain, a gold standard training dataset is difficult to obtain due to lack of agreement by non-expert annotators. Therefore, in this work, we would like to turn to unsupervised keyphrase extraction techniques (Hasan and Ng, 2010; Mihalcea and Tarau, 2004) where the task is performed with help of semantic relatedness. Further, to fine-tune this task for the art domain, we want to pursue domain-specific keyphrase extraction techniques (Wu et al., 2005; Hulth et al., 2001 ). The second method is to directly embed the text in the semantic space. For this approach, we would need to perform the segmentation of the text excerpts, followed by identification of the relevant segments that contain descriptions of the artwork images. This is important particularly for art books where the texts include discussions not only about artworks, but also about the artists, art styles, etc.", |
|
"cite_spans": [ |
|
{ |
|
"start": 697, |
|
"end": 721, |
|
"text": "(Jain and Krestel, 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1958, |
|
"end": 1978, |
|
"text": "(Jiang et al., 2009)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 2317, |
|
"end": 2337, |
|
"text": "(Hasan and Ng, 2010;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 2338, |
|
"end": 2363, |
|
"text": "Mihalcea and Tarau, 2004)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 2545, |
|
"end": 2562, |
|
"text": "(Wu et al., 2005;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 2563, |
|
"end": 2581, |
|
"text": "Hulth et al., 2001", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Analysis", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "In order to analyze the semantic content of digitized images, we plan to use modern computer vision methods based on deep learning. Computer vision tasks which are very close to the tasks that we want to perform, are automatic image classification (Krizhevsky et al., 2012) , image captioning (Xu et al., 2015) , i.e. the generation of textual desciptions of depicted content, and object detection (Ren et al., 2015) . All of these methods extract semantic information from images and have been shown to work very well on photographs. The most challenging problem in working with images of artworks is that photographs have a very different underlying data distribution than images of artworks, especially paintings. This makes it necessary to train machine learning models directly on images of artworks. However, large-scale annotated training data sets with artworks are not available. There exist some datasets that contain artworks and annotations (e.g. art style), such as the WiKiArt database 5 , or the OmniArt dataset (Strezoski and Worring, 2018) . However, none of these datasets can be used for image classification or automatic image captioning, since they lack the annotations required for these tasks. We can, however, make use of photographies and their annotations, which are available in large-scale datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 248, |
|
"end": 273, |
|
"text": "(Krizhevsky et al., 2012)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 293, |
|
"end": 310, |
|
"text": "(Xu et al., 2015)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 416, |
|
"text": "(Ren et al., 2015)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1027, |
|
"end": 1056, |
|
"text": "(Strezoski and Worring, 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Image Analysis", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "To this end, we want to follow (Thomas and Kovashka, 2019) and use methods of neural style transfer (Gatys et al., 2015; Yao et al., 2019) to create new large-scale art centered datasets for image classification, image captioning, and object recognition on artworks. For image classification, we want to use the ImageNet dataset (Deng et al., 2009) and create a new ArtImageNet dataset that we will use as a base model in a subsequent step to train an image classification model. For image captioning and object detection, we want to use the COCO dataset (Lin et al., 2014) and fine-tune the image classification model that we created earlier for each of these tasks. For creating the artistic versions of the photographs from each dataset, we want use the WikiArt or the OmniArt dataset, as artistic style images.", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 58, |
|
"text": "(Thomas and Kovashka, 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 100, |
|
"end": 120, |
|
"text": "(Gatys et al., 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 138, |
|
"text": "Yao et al., 2019)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 329, |
|
"end": 348, |
|
"text": "(Deng et al., 2009)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 573, |
|
"text": "(Lin et al., 2014)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Image Analysis", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "After performing the extraction of meaningful features from textual data and image data in parallel, the next step is to find ways of aligning the extracted information and match an image to its accompanying text. For this, we want to embed the output from the text analysis and image analysis component in a common semantic space (i.e via word embeddings), where we can represent similar concepts close to each other and thereby find text and image pairs that might be a good match. Another idea, is to use the feature vectors created by the image analysis methods and train a further model to embed them into the same semantic space as the word embeddings of the relevant texts and phrases. Such an alignment in a common semantic subspace will allow us to perform image retrieval for a given text query and also text retrieval for a given query image.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Alignment", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "In this section, we address the question of the evaluation of the proposed framework. This question can be divided into three parts: 1) How to evaluate the proposed text analysis methods regarding their adjustments to fit the challenges of extracting relevant information from art-historic archives. 2) How to evaluate the proposed image analysis methods in the context of art analysis, since state-of-the-art image analysis methods are mainly trained on photographs, which are quite different from artworks. 3) How to evaluate the framework that performs the alignment of the information from the text and image analysis components to enable matching of images with their textual descriptions. Evaluation of Text Analysis. As discussed in Section 3.1., the availability of annotated datasets for training and evaluation is a major bottleneck for evaluating semantic representations, especially in the art domain. For this, we plan to enlist the help of domain experts for the creation of a smaller gold standard test dataset that will include annotations for the most important textual segments or keyphrases for identifying the corresponding images. The performance of our text analytics approaches can then be measured by comparing the results with the gold standard annotations in terms of precision and recall. Evaluation of Image Analysis. The most important aspect in evaluating the image analysis methods is how well they can be adapted to work on images of art, despite having only a very small amount of annotated real training data available. Though there are datasets available, e.g. provided by Europeana 6 , their annotations do not follow a common scheme which limits their utility for our purpose. As we propose in Section 3.2., we want to use methods of neural style transfer to create a sufficient amount of training data. On the one hand, we want to focus on the plain numerical evaluation of these models, using well known evaluation metrics, like classification accuracy for image classification, precision, recall and f-measure, as well as average precision for object detection, and metrics for image captioning evaluation, e.g. BLEU-score (Papineni et al., 2002) , METEOR (Banerjee and Lavie, 2005) , CIDEr (Vedantam et al., 2015) , ROUGE (Lin, 2004) , SPICE (Anderson et al., 2016) , BERTScore (Zhang et al., 2019) , or Mover-Score (Zhao et al., 2019) . On the other hand, we are interested in evaluating the influence of different base models that are used to create our image captioning for art, or object detection models. Here, we want to compare a standard Im-ageNet model with a model created with our ArtImageNet dataset. We want to use this to examine whether automatic methods can successfully be used to generate novel annotated data, based on already available data. Evaluation of Text and Image Alignment. The task of matching a given text to an image in an art catalogue can be cast as a retrieval task. This retrieval task consists of two aspects. The first aspect is to retrieve an image, given a textual description and the second is to retrieve a textual description, given an input image. We can use standard image retrieval evaluation methods, also used in related work (Kiros et al., 2014; Liu et al., 2019) , such as recall at k (R@K), for the evaluation. Here, we are interested in different values of K based on the granularity of the current search. If we only consider a single page with text and several images, we are interested in the recall at K = 1, whereas if we want to retrieve an image to a given text over an entire catalogue, we are interested in the performance at higher values of K.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2163, |
|
"end": 2186, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 2196, |
|
"end": 2222, |
|
"text": "(Banerjee and Lavie, 2005)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 2231, |
|
"end": 2254, |
|
"text": "(Vedantam et al., 2015)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 2263, |
|
"end": 2274, |
|
"text": "(Lin, 2004)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 2283, |
|
"end": 2306, |
|
"text": "(Anderson et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 2319, |
|
"end": 2339, |
|
"text": "(Zhang et al., 2019)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 2357, |
|
"end": 2376, |
|
"text": "(Zhao et al., 2019)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 3214, |
|
"end": 3234, |
|
"text": "(Kiros et al., 2014;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 3235, |
|
"end": 3252, |
|
"text": "Liu et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methods", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "Since the problem of extracting images and their textual descriptions from art-historic archives has not been studied before, there are no evaluation datasets available. For the evaluation of our method, it will be important to create an evaluation dataset with help from domain experts that includes different levels of granularity, for measuring the performance of this kind of retrieval task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methods", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "In this paper, we present the description of a project that deals with the novel task of matching artwork images to their corresponding text descriptions in digitized arthistoric corpora. We provide an overview of the related work and challenges in this domain and describe a possible framework to tackle the problem of image and text alignment. Furthermore, we give an overview of the possible evaluation methods that we want to use for evaluating each component as well as the overall performance of our proposed framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "https://hpi.de/naumann/projects/ web-science/caart.html 4 https://wpi.art/ also require techniques to handle noise in datasets as proposed by these papers, this is not the primary focus of our work. For our text analytics approach, we need to broaden the scope beyond NER to identify the most important phrases from the digitized texts that contain descriptions of the artworks, which has not been addressed by any previous work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.wikiart.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.europeana.eu/en/collections/ topic/190-art", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Acknowledgement. We thank the Wildenstein Plattner Institute for providing access to their art-historic archives.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "SPICE: Semantic Propositional Image Caption Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Fernando", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Gould", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the European Conference on Computer Vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "382--398", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anderson, P., Fernando, B., Johnson, M., and Gould, S. (2016). SPICE: Semantic Propositional Image Caption Evaluation. In Proceedings of the European Conference on Computer Vision, pages 382-398.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "METEOR: An automatic metric for MT evaluation with improved correlation with human judgments", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Banerjee, S. and Lavie, A. (2005). METEOR: An auto- matic metric for MT evaluation with improved correla- tion with human judgments. In Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Mea- sures for Machine Translation and/or Summarization, pages 65-72.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Supporting Linked Data Production for Cultural Heritage Institutes: The Amsterdam Museum Case Study", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "De Boer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Wielemaker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Van Gent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Hildebrand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Isaac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Van Ossenbruggen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Schreiber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Extended Semantic Web Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "733--747", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "De Boer, V., Wielemaker, J., Van Gent, J., Hildebrand, M., Isaac, A., Van Ossenbruggen, J., and Schreiber, G. (2012). Supporting Linked Data Production for Cul- tural Heritage Institutes: The Amsterdam Museum Case Study. In Proceedings of the Extended Semantic Web Conference, pages 733-747.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "ImageNet: A large-scale hierarchical image database", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "248--255", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deng, J., Dong, W., Socher, R., Li, L., Li, K., and Fei- Fei, L. (2009). ImageNet: A large-scale hierarchical im- age database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 248- 255.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "The Rijksmuseum Collection as Linked Data. Semantic Web", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Dijkshoorn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Jongma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Aroyo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Van Ossenbruggen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Schreiber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Ter Weele", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Wielemaker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "221--230", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dijkshoorn, C., Jongma, L., Aroyo, L., Van Ossenbruggen, J., Schreiber, G., ter Weele, W., and Wielemaker, J. (2018). The Rijksmuseum Collection as Linked Data. Semantic Web, 9(2):221-230.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Diachronic Evaluation of NER Systems on Old Newspapers", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ehrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Colavizza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Rochat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 13th Conference on Natural Language Processing (KONVENS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "97--107", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ehrmann, M., Colavizza, G., Rochat, Y., and Kaplan, F. (2016). Diachronic Evaluation of NER Systems on Old Newspapers. In Proceedings of the 13th Conference on Natural Language Processing (KONVENS), pages 97- 107.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The Shape of Art History in the Eyes of the Machine", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Elgammal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Elhoseiny", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mazzone", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elgammal, A., Liu, B., Kim, D., Elhoseiny, M., and Maz- zone, M. (2018). The Shape of Art History in the Eyes of the Machine. In Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A Neural Algorithm of Artistic Style", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Gatys", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Ecker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bethge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1508.06576" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gatys, L. A., Ecker, A. S., and Bethge, M. (2015). A Neu- ral Algorithm of Artistic Style. arXiv:1508.06576 [cs, q-bio].", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Finding Parallel Passages in Cultural Heritage Archives", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Levene", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Levene", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal on Computing and Cultural Heritage", |
|
"volume": "11", |
|
"issue": "3", |
|
"pages": "1--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Harris, M., Levene, M., Zhang, D., and Levene, D. (2018). Finding Parallel Passages in Cultural Heritage Archives. Journal on Computing and Cultural Heritage, 11(3):1- 24.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Conundrums in Unsupervised Keyphrase Extraction: Making Sense of the Stateof-the-art", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Hasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 23rd International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "365--373", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hasan, K. S. and Ng, V. (2010). Conundrums in Unsuper- vised Keyphrase Extraction: Making Sense of the State- of-the-art. In Proceedings of the 23rd International Con- ference on Computational Linguistics, pages 365-373.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Fine-Art Painting Classification via Two-Channel Deep Residual Network", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S.-H", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Multimedia Information Processing -PCM 2017", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "79--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huang, X., Zhong, S.-h., and Xiao, Z. (2018). Fine-Art Painting Classification via Two-Channel Deep Residual Network. In Advances in Multimedia Information Pro- cessing -PCM 2017, pages 79-88.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Automatic Keyword Extraction using Domain Knowledge", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hulth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Karlgren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Jonsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Bostr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Asker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the International Conference on Intelligent Text Processing and Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "472--482", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hulth, A., Karlgren, J., Jonsson, A., Bostr\u00f6m, H., and Asker, L. (2001). Automatic Keyword Extraction using Domain Knowledge. In Proceedings of the International Conference on Intelligent Text Processing and Computa- tional Linguistics, pages 472-482.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Knowledge-based Relation Discovery in Cultural Heritage Knowledge Graphs", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Hyv\u00f6nen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Rantala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Digital Humanities in the Nordic Countries", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "230--239", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hyv\u00f6nen, E. and Rantala, H. (2019). Knowledge-based Relation Discovery in Cultural Heritage Knowledge Graphs. In Digital Humanities in the Nordic Countries, pages 230-239.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Who is Mona L.? Identifying Mentions of Artworks in Historical Archives", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Krestel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the International Conference on Theory and Practice of Digital Libraries", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "115--122", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jain, N. and Krestel, R. (2019). Who is Mona L.? Iden- tifying Mentions of Artworks in Historical Archives. In Proceedings of the International Conference on Theory and Practice of Digital Libraries, pages 115-122.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A Ranking Approach to Keyphrase Extraction", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 32nd International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "756--757", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiang, X., Hu, Y., and Li, H. (2009). A Ranking Approach to Keyphrase Extraction. In Proceedings of the 32nd International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 756-757.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Names, Right or Wrong: Named Entities in an OCRed Historical Finnish Newspaper Collection", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kettunen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ruokolainen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2nd International Conference on Digital Access to Textual Cultural Heritage", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "181--186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kettunen, K. and Ruokolainen, T. (2017). Names, Right or Wrong: Named Entities in an OCRed Historical Finnish Newspaper Collection. In Proceedings of the 2nd Inter- national Conference on Digital Access to Textual Cul- tural Heritage, pages 181-186.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Unifying Visual-Semantic Embeddings with Multimodal Neural Language Models", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Zemel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1411.2539" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kiros, R., Salakhutdinov, R., and Zemel, R. S. (2014). Unifying Visual-Semantic Embeddings with Multimodal Neural Language Models. arXiv:1411.2539 [cs].", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "ImageNet Classification with Deep Convolutional Neural Networks", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "25", |
|
"issue": "", |
|
"pages": "1097--1105", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "ImageNet Classification with Deep Convolutional Neu- ral Networks. In Advances in Neural Information Pro- cessing Systems 25, pages 1097-1105.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Recognizing Art Style Automatically in Painting with Deep Learning", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Lecoutre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Negrevergne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Yger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Asian Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "327--342", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lecoutre, A., Negrevergne, B., and Yger, F. (2017). Rec- ognizing Art Style Automatically in Painting with Deep Learning. In Proceedings of the Asian Conference on Machine Learning, pages 327-342.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Microsoft COCO: Common Objects in Context", |
|
"authors": [ |
|
{ |
|
"first": "T.-Y", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Maire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Belongie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Hays", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Perona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Ramanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Doll\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the European Conference on Computer Vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "740--755", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lin, T.-Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Doll\u00e1r, P., and Zitnick, C. L. (2014). Mi- crosoft COCO: Common Objects in Context. In Pro- ceedings of the European Conference on Computer Vi- sion, pages 740-755.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "ROUGE: A package for automatic evaluation of summaries", |
|
"authors": [ |
|
{ |
|
"first": "C.-Y", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of Workshop on Text Summarization Branches out, Post2Conference Workshop of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lin, C.-Y. (2004). ROUGE: A package for automatic evaluation of summaries. In Proceedings of Workshop on Text Summarization Branches out, Post2Conference Workshop of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "CycleMatch: A cycle-consistent embedding network for image-text matching", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Bakker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Lew", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Pattern Recognition", |
|
"volume": "93", |
|
"issue": "", |
|
"pages": "365--379", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liu, Y., Guo, Y., Liu, L., Bakker, E. M., and Lew, M. S. (2019). CycleMatch: A cycle-consistent embedding network for image-text matching. Pattern Recognition, 93:365-379.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "TextRank: Bringing Order into Text", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Tarau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "404--411", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihalcea, R. and Tarau, P. (2004). TextRank: Bringing Or- der into Text. In Proceedings of the Conference on Em- pirical Methods in Natural Language Processing, pages 404-411.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "BLEU: A Method for Automatic Evaluation of Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W.-J", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Papineni, K., Roukos, S., Ward, T., and Zhu, W.-J. (2002). BLEU: A Method for Automatic Evaluation of Machine Translation. In Proceedings of the 40th Annual Meet- ing on Association for Computational Linguistics, pages 311-318.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Girshick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "91--99", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ren, S., He, K., Girshick, R., and Sun, J. (2015). Faster R- CNN: Towards Real-Time Object Detection with Region Proposal Networks. In Advances in Neural Information Processing Systems 28, pages 91-99.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Comparison of Named Entity Recognition Tools for Raw OCR Text", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Rodriquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bryant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Blanke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Luszczynska", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "410--414", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rodriquez, K. J., Bryant, M., Blanke, T., and Luszczynska, M. (2012). Comparison of Named Entity Recognition Tools for Raw OCR Text. In Konvens, pages 410-414.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Hacking History via Event Extraction", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Segers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Van Erp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Van Der Meij", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Aroyo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Van Ossenbruggen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Schreiber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Wielinga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Oomen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Jacobs", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 6th International Conference on Knowledge Capture", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "161--162", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Segers, R., Van Erp, M., Van Der Meij, L., Aroyo, L., van Ossenbruggen, J., Schreiber, G., Wielinga, B., Oomen, J., and Jacobs, G. (2011). Hacking History via Event Extraction. In Proceedings of the 6th International Con- ference on Knowledge Capture, pages 161-162.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "OmniArt: A Largescale Artistic Benchmark", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Strezoski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Worring", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACM Transactions on Multimedia Computing", |
|
"volume": "14", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Strezoski, G. and Worring, M. (2018). OmniArt: A Large- scale Artistic Benchmark. ACM Transactions on Multi- media Computing, Communications, and Applications, 14(4):88:1-88:21.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Artistic Object Recognition by Unsupervised Style Adaptation", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Kovashka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Asian Conference on Computer Vision ACCV 2018", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "460--476", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas, C. and Kovashka, A. (2019). Artistic Object Recognition by Unsupervised Style Adaptation. In Pro- ceedings of the Asian Conference on Computer Vision ACCV 2018, pages 460-476.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Linked Data for Libraries, Archives and Museums: How to Clean", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Van Hooland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Verborgh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Van Hooland, S. and Verborgh, R. (2014). Linked Data for Libraries, Archives and Museums: How to Clean, Link and Publish your Metadata.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Exploring Entity Recognition and Disambiguation for Cultural Heritage Collections", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Van Hooland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "De Wilde", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Verborgh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Steiner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Van De Walle", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Digital Scholarship in the Humanities", |
|
"volume": "30", |
|
"issue": "2", |
|
"pages": "262--279", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Van Hooland, S., De Wilde, M., Verborgh, R., Steiner, T., and Van de Walle, R. (2013). Exploring Entity Recog- nition and Disambiguation for Cultural Heritage Collec- tions. Digital Scholarship in the Humanities, 30(2):262- 279.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "CIDEr: Consensus-Based Image Description Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Vedantam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Lawrence Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4566--4575", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vedantam, R., Lawrence Zitnick, C., and Parikh, D. (2015). CIDEr: Consensus-Based Image Description Evaluation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4566- 4575.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Domain-Specific Keyphrase Extraction", |
|
"authors": [ |
|
{ |
|
"first": "Y.-F", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Bot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 14th ACM International Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "283--284", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wu, Y.-f. B., Li, Q., Bot, R. S., and Chen, X. (2005). Domain-Specific Keyphrase Extraction. In Proceedings of the 14th ACM International Conference on Informa- tion and Knowledge Management, pages 283-284.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Neural Image Caption Generation with Visual Attention", |
|
"authors": [ |
|
{ |
|
"first": "Attend", |
|
"middle": [], |
|
"last": "Show", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 32nd International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2048--2057", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Show, Attend and Tell: Neural Image Caption Genera- tion with Visual Attention. In Proceedings of the 32nd International Conference on Machine Learning, ICML 2015, pages 2048-2057.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Classifying Digitized Art Type and Time Period", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Oh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Merchant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Howe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "West", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 1st Workshop on Data Science for Digital Art History-Tacking Big Data", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang, S., Oh, B. M., Merchant, D., Howe, B., and West, J. (2018). Classifying Digitized Art Type and Time Period. In Proceedings of the 1st Workshop on Data Science for Digital Art History-Tacking Big Data.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Attention-Aware Multi-Stroke Style Transfer", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y.-J", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wang", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1467--1475", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yao, Y., Ren, J., Xie, X., Liu, W., Liu, Y.-J., and Wang, J. (2019). Attention-Aware Multi-Stroke Style Transfer. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1467-1475.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "BERTScore: Evaluating Text Generation with BERT", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Kishore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"Q" |
|
], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Artzi", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.09675" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhang, T., Kishore, V., Wu, F., Weinberger, K. Q., and Artzi, Y. (2019). BERTScore: Evaluating Text Gener- ation with BERT. arXiv:1904.09675 [cs].", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "MoverScore: Text Generation Evaluating with Contextualized Embeddings and Earth Mover Distance", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Peyrard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Meyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Eger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing(EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "563--578", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhao, W., Peyrard, M., Liu, F., Gao, Y., Meyer, C. M., and Eger, S. (2019). MoverScore: Text Generation Evalu- ating with Contextualized Embeddings and Earth Mover Distance. In Proceedings of the 2019 Conference on Em- pirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing(EMNLP-IJCNLP), pages 563-578.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "(2) The text analysis component extracts the most representative terms with help of NER and keyphrase identification. (3) In parallel, images on each page are localized and the image analysis", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "Overview of proposed framework component extracts semantic meta-data from each image. (4) In the semantic alignment component, the results of step (1) and step (2) are embedded into a shared space and are used for matching and linking of the images to texts. Figure 1 provides a structural overview of the proposed framework.", |
|
"num": null, |
|
"type_str": "figure" |
|
} |
|
} |
|
} |
|
} |