|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:57:58.172554Z" |
|
}, |
|
"title": "Neural Machine Translation of Artwork Titles Using Iconclass Codes", |
|
"authors": [ |
|
{ |
|
"first": "Nikolay", |
|
"middle": [], |
|
"last": "Banar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Antwerp", |
|
"location": { |
|
"settlement": "Antwerp", |
|
"country": "Belgium" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Antwerp", |
|
"location": { |
|
"settlement": "Antwerp", |
|
"country": "Belgium" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Kestemont", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Antwerp", |
|
"location": { |
|
"settlement": "Antwerp", |
|
"country": "Belgium" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We investigate the use of Iconclass in the context of neural machine translation for NL\u2194EN artwork titles. Iconclass is a widely used iconographic classification system used in the cultural heritage domain to describe and retrieve subjects represented in the visual arts. The resource contains keywords and definitions to encode the presence of objects, people, events and ideas depicted in artworks, such as paintings. We propose a simple concatenation approach that improves the quality of automatically generated title translations for artworks, by leveraging textual information extracted from Iconclass. Our results demonstrate that a neural machine translation system is able to exploit this metadata to boost the translation performance of artwork titles. This technology enables interesting applications of machine learning in resource-scarce domains in the cultural sector.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We investigate the use of Iconclass in the context of neural machine translation for NL\u2194EN artwork titles. Iconclass is a widely used iconographic classification system used in the cultural heritage domain to describe and retrieve subjects represented in the visual arts. The resource contains keywords and definitions to encode the presence of objects, people, events and ideas depicted in artworks, such as paintings. We propose a simple concatenation approach that improves the quality of automatically generated title translations for artworks, by leveraging textual information extracted from Iconclass. Our results demonstrate that a neural machine translation system is able to exploit this metadata to boost the translation performance of artwork titles. This technology enables interesting applications of machine learning in resource-scarce domains in the cultural sector.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In the age of mass-digitization, cultural heritage institutions put significant effort in making their (meta) data available to developers and researchers. Artificial intelligence, and machine learning in particular, increasingly plays an important role in this process (Fiorucci et al., 2020) . Recent case studies have demonstrated successful applications of machine learning methods to cultural heritage collections. Most of this work relies on advances in computational methods and utilizes a modelling framework known as deep neural networks (LeCun et al., 2015; Schmidhuber, 2015) . However, such algorithms are dataintensive and require large annotated datasets, which recently have become available in some fields (Tiedemann, 2012; Krizhevsky et al., 2012; Lin et al., 2014) . These datasets contain millions of training items, which allowed researchers to achieve impressive results in many tasks. However, the construction of such materials in the domain of cultural heritage material is an even more expensive process, as it requires the intervention of highly-trained subject experts. Hence, many institutions can only offer smaller datasets, that contain just a fraction of the number of training examples that are needed to train a deep learning algorithms. Transfer learning is a common solution to overcome such a lack of training data (Ruder et al., 2019) . In neural machine translation (NMT), networks are nowadays commonly pre-trained on large generic datasets of parallel sentences, before they get fine-tuned on a more specific \"downstream\" corpus. Such networks, however, are conventionally only exposed to the actual sentence pairs in the target domain and are ignorant of additional knowledge that might be available such as, for example, iconographic metadata about objects and their relations. In the case of artworks, computational methods that can exploit such additional knowledge are highly appealing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 270, |
|
"end": 293, |
|
"text": "(Fiorucci et al., 2020)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 547, |
|
"end": 567, |
|
"text": "(LeCun et al., 2015;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 568, |
|
"end": 586, |
|
"text": "Schmidhuber, 2015)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 722, |
|
"end": 739, |
|
"text": "(Tiedemann, 2012;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 740, |
|
"end": 764, |
|
"text": "Krizhevsky et al., 2012;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 765, |
|
"end": 782, |
|
"text": "Lin et al., 2014)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1352, |
|
"end": 1372, |
|
"text": "(Ruder et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This work aims to apply NMT in the context of cultural heritage metadata using Iconclass (Vellekoop et al., 1973) as a source of external knowledge. Iconclass (see Section 3.1.1) contains keywords and definitions of subjects represented in artworks. We propose a simple approach to integrate this external knowledge in an NMT architecture for artwork titles to improve the translation performance. The structure of this paper is as follows. We first present the related work in Section 2. Then, we describe the datasets and present the applied methods in more detail in Section 3. Next, we present the results of our case study and discuss them in Section 4. Finally, we summarize our main contributions and findings with proposals for future work in Section 5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 113, |
|
"text": "(Vellekoop et al., 1973)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Modern NMT systems nowadays often work at the level of an individual sentence pair and aim to translate a source sentence into a target sentence, without making use of additional information other than the source sentence itself. The idea to concatenate a source sentence with additional information, however, is not new. This preprocessing step is appealing due to its simplicity and model-agnostic applicability. Previous approaches in this respect are generally divided into 2 categories (see examples in Table 1 ): (i) extended context (Tiedemann and Scherrer, 2017) , where additional information in the source language is added to the source sentence (and sometimes to the target sentence); (ii) data augmentation (Bult\u00e9 and Tezcan, 2019) , where the source sentence is enriched with information in the target language. Tiedemann and Scherrer (2017) investigated the benefits of the extended context approach in attentionbased NMT for DE\u2192EN subtitles (see Table 1 ). The source sentence was concatenated with the previous source sentence and, then, the same technique was additionally applied to the target sentence. They used a special prefix to mark tokens belonging to the extended context. Although the improvement over the baseline was moderate, the NMT models were able to utilize the additional context and to distinguish it from the main sentence. In follow-up work, Bawden et al. (2018) designed EN\u2192FR test sets to investigate the usefulness of the previous source and target sentences in the context of NMT. They demonstrated that the concatenation strategy leads to improved performance. Agrawal et al. 2018applied the concatenation technique with a Transformer-based architecture to EN\u2192IT TED talks and experimentally varied the number of concatenated sentences included. There too, the extended context was demonstrated to be beneficial for Transformers. Junczys-Dowmunt (2019), finally, developed one of the best-performing systems based on the same idea in the context of the WMT19 news translation shared task for EN\u2192DE. Bult\u00e9 and Tezcan (2019) proposed a simple and efficient data augmentation method for NMT that yielded substantial performance improvements for EN\u2192NL and EN\u2192HU. The source sentence was concatenated with fuzzy matches, or sentences in the target language retrieved from a translation memory, that covered the entire training set. The fuzzy matches were selected on the basis of a simple similarity measurement between each source sentence and all other source sentences from the translation memory. Then, the fuzzy source sentences with a similarity score above a given threshold were stored with their corresponding target sentences. In a subsequent study, Jitao et al. (2020) improved the previously proposed method by explicitly informing models about any relevant tokens in the fuzzy matches and incorporating distributed sentence representations (see Table 1 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 540, |
|
"end": 570, |
|
"text": "(Tiedemann and Scherrer, 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 720, |
|
"end": 744, |
|
"text": "(Bult\u00e9 and Tezcan, 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 826, |
|
"end": 855, |
|
"text": "Tiedemann and Scherrer (2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1381, |
|
"end": 1401, |
|
"text": "Bawden et al. (2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 2043, |
|
"end": 2066, |
|
"text": "Bult\u00e9 and Tezcan (2019)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 508, |
|
"end": 515, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 962, |
|
"end": 969, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2897, |
|
"end": 2904, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Inspired by this previous work, we investigate the use of Iconclass in the context of artwork title translations. We use the definitions and keywords associated with Iconclass codes to extend and augment the artwork titles. Our main contribution is that we demonstrate that Iconclass definitions, when provided within a data augmentation strategy, improve translation performance. extended context source cc sieh cc , cc Bob cc ! -Wo sind sie? target -Where are they? data augmentation source How long does a cold last? || Combien de temps dure le vol?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "target Combien de temps dure un rhume? Table 1 : Examples of the extended context approach from Tiedemann and Scherrer (2017) and the data augmentation approach from Jitao et al. (2020). The special prefix cc indicates tokens from the extended context and the special token || separates the augmented sentence from the main and additional parts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 125, |
|
"text": "Tiedemann and Scherrer (2017)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 46, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this section, we describe the datasets and the methods that we utilized in our research. We justify our choice for the particular NMT model used and provide details on the experimental settings and evaluation measures.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Iconclass is an iconographic classification system used by stakeholders in the GLAM sector (Galleries, Libraries, Archives and Museums) to describe and retrieve subjects represented in the visual arts. Each subject represented in Iconclass is assigned a unique Iconclass code or identifier, that includes keywords and definitions in multiple languages (see Figure 1 ). In total, Iconclass contains a set of 28,000 hierarchically ordered definitions and 14,000 keywords. An Iconclass code starts with a digits ranging from 0 to 9 representing 10 main categories: (0) abstract art;", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 357, |
|
"end": 365, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Iconclass", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "(1-5) general topics; (6) history; (7) Bible; (8) literature; (9) classical mythology and ancient history. An Iconclass code can be further complemented by the options presented in (2) bracketed text to add the name of a specific entity;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Iconclass", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "(3) bracketed text with a plus-sign to add an additional 'shade of meaning'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Iconclass", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "The NL\u2194EN artwork dataset used below has been extracted from the database of the Netherlands Institute for Art History 2 . We deleted all duplicates from the dataset and finally obtained 21,988 sentence pairs, with the corresponding Iconclass codes for the artworks in question. We randomly selected 2,000 sentence pairs as a development set and included another 2,000 sentence pairs in the test set. The training set contains 1.35 \u00b1 0.72 Iconclass codes per sentence/title and we randomly sample one Iconclass code per sentence/title in the development and test sets. In this work, we do not exploit the hierarchical structure of Iconclass codes and leave this worthwhile option to future work. Additional details about the datasets are provided in Table 3 . We experimented with 4 different concatenation strategies. Each source sentence s i (in English or Dutch) was concatenated using bracketed tags to the corresponding English description d en i or set of keywords in English k en i or Dutch Table 3 : Statistics of the dataset: mean and standard deviation in sentence lengths.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 750, |
|
"end": 757, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 998, |
|
"end": 1005, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Artwork dataset", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "k nl i : (1) s i (txs) d en i (a) 11F25 (b) 31A235 (c) 11I35", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Artwork dataset", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "(txe); (2) s i (kws) k en i (kwe); (3) s i (kws) k nl i (kwe); (4) s i (txs) d en i (txe) (kws) k en i (kwe). If a sentence from the training set has more than one Iconclass code, then the sentence is separately matched with each Iconclass code using one of the concatenations as shown in Table 5 . We balance each concatenated sentence by adding its original version to the training and development sets in order to make the models learn both types of sentences equally well. Additionally, we use two versions of the test set in our evaluation: (1) a baseline test set, where we use original sentences without any concatenations; (2) the test set as concatenated with the corresponding additional information.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 296, |
|
"text": "Table 5", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Artwork dataset", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "Pre-training has become a common solution to cope with small datasets across many domains in deep learning (Ruder et al., 2019) . In our experiments, we used 1,777,653 sentence pairs extracted from the Europarl corpus (Tiedemann, 2012) for the NL\u2194EN language pair, in order to pre-train the models on a generic background corpus. We randomly selected 3,000 sentence pairs as a development set and 3,000 sentence pairs in the test set respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 127, |
|
"text": "(Ruder et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 235, |
|
"text": "(Tiedemann, 2012)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "Banar et al. (2020) demonstrated the advantages of character-level translation over the subword-level approach for artwork titles. Hence, we exclusively resorted to character-level models in the present work. However, Banar et al. (2020) used a fusion of recurrent and convolutional models (Lee et al., 2017) that has become outdated. The recent emergence of NMT models started with recurrent neural networks such as GRU or LSTM memory cells Sutskever et al., 2014; Luong et al., 2015; , but since then it has been established that Transformer-based architectures (Vaswani et al., 2017) persuasively outperform recurrent and convolutional models across various tasks. The Transformer model mitigates some of the limitations of recurrent and convolutional models; the Transformer, for example, includes self-attention mechanisms that can access all positions in a previous layer. Therefore, the receptive field is not as myopic as with convolutional models. Additionally, the absence of recurrent connections allows one to make the training process fully parallellizable. Therefore, such models nowadays are more appealing for our problem.", |
|
"cite_spans": [ |
|
{ |
|
"start": 218, |
|
"end": 237, |
|
"text": "Banar et al. (2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 290, |
|
"end": 308, |
|
"text": "(Lee et al., 2017)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 442, |
|
"end": 465, |
|
"text": "Sutskever et al., 2014;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 485, |
|
"text": "Luong et al., 2015;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 564, |
|
"end": 586, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Details", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As character-level translation works better for the translation of artwork titles (Banar et al., 2020) , we applied a four-layer character-level Transformer (Vaswani et al., 2017) , implemented in the OpenNMTpy framework (Klein et al., 2017) . The vocabulary size was set to 300 characters and the length of sentences was limited to 450 characters. The models were trained by minimizing the negative conditional log-likelihood using the Adam optimizer (Kingma and Ba, 2014) with a batch size of 6,144 tokens and an accumulation count of 4. First, we pre-train the models on the general corpus and, then, fine-tune them on our domain-specific corpus. Each model was trained on a single GeForce GTX 1080 Ti with 11 GB RAM. In the pre-training phase, the models were initialized using the method proposed by Glorot and Bengio (2010) and trained for 100,000 updates using the Noam decay schedule (Popel and Bojar, 2018) with an initial learning rate of 2. In the fine-tuning phase, the initial learning rate was set to 0.0001. The fine-tuning was interrupted as soon as the validation loss did not decrease for 600 updates. In the decoding part of the architecture, we applied a beam search with a beam size of 25. The evaluation was conducted using three standard metrics: CHARACTER 3 (Wang et al., 2016) , CHRF 4 (Popovi\u0107, 2015) and BLEU-4 (Papineni et al., 2002) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 102, |
|
"text": "(Banar et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 179, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 221, |
|
"end": 241, |
|
"text": "(Klein et al., 2017)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 805, |
|
"end": 829, |
|
"text": "Glorot and Bengio (2010)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 892, |
|
"end": 915, |
|
"text": "(Popel and Bojar, 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1282, |
|
"end": 1301, |
|
"text": "(Wang et al., 2016)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1311, |
|
"end": 1326, |
|
"text": "(Popovi\u0107, 2015)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1331, |
|
"end": 1361, |
|
"text": "BLEU-4 (Papineni et al., 2002)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training and Inference Details", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We present our quantitative results in Section 4.1. We divide our experimental results into two different sections. First, we assess the use of the extended context in Section 4.1.1. Second, we compare the various data augmentation strategies to the baseline in Section 4.1.2. In Section 4.2 we manually inspect a selection of outputs for the best performing model. Table 6 : Results of the experiments. The type 'EC' and 'DA' correspond to experiments with extended context and data augmentation approaches respectively. The arrows near the metrics in the column labels indicate the desired direction of improvement (i.e. whether a higher/lower score for this metric is better). The 'baseline' experiment corresponds to the models fine-tuned without any additional information. The columns 'Test with context' and 'Baseline test' correspond to translation of the sentences enriched with additional information and without it, correspondingly.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 366, |
|
"end": 373, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "From Table 6 we can see that the models (b, h) fine-tuned with the context extended by keywords demonstrate comparable results to the baseline models (a, f) in the baseline test. Model (h) slightly outperforms the associated baseline, while model (b) is slightly worse in this testing scenario. The testing scenario with the extended context suggests that these models do not successfully manage to exploit the additional context, as there is no obvious boost in performance. For model (h), we can even observe a subtle decrease in performance. As the definitions were only available in English, the context has been extended by definitions only for EN\u2192NL (see the models i and j). These models still show comparable performance to the baseline model (f) when translating without the extended context. The testing scenario with the extended context is also not beneficial. Therefore, we observe that the extended context in the testing phase is generally not advantageous and, hence, we conclude that the use of these concatenations is not beneficial in our case.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 12, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Extended Context", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "As shown in Table 6 , the models (c, g) fine-tuned with keywords as a source of data augmentation show comparable results to the baseline models (a, f) in the baseline testing scenario. Therefore, we conclude that the source sentences augmented by keywords are not helpful for our task. The models (d, e) utilize definitions for the data augmentation for NL\u2192EN. These models demonstrate slightly worse performance compared to the baseline model (a) when translating without data augmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Augmentation", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "Translating with the source sentences augmented by definitions, however, boosts the performance of these models and we observe substantial improvement over baseline model (a) and their own non-augmented counterparts. We manually inspected the output of the model (d) and discuss some interpretive observations in the next Section 4.2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Augmentation", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "In this section, we qualitatively discuss the outputs of the best performing model (d) in the testing scenario with data augmentation from Table 6 . We compare its output to that for the basic test setting, as well as to baseline model (a). We divide our findings into four main categories below.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 146, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Lexical choices. The model is able to exploit additional information in the target language in order to select words closer to target translations. From Figure 2a , we can see that the target translation is not literal and the word Man is absent in the Dutch counterpart. The literal translation of this sentence is Creation and Fall. In this case, baseline model (a) and model (d) in the basic testing scenario generate non-literal translations with the additional post-modification of mankind. The translation is close to the target, but it is still erroneous. Model (d) pays attention to the phrase creation of man from the Iconclass definition and decides to adopt the word Man instead of the word mankind as in the baseline and in the case without data augmentation. Detection of lexical units. As mentioned in Section 3.1.1, Iconclass definitions describe subjects represented in artwork images. If a subject is widely represented in iconography, an artwork can even have the same title as an Iconclass definition or the definition can at least contain parts of the target translations. Hence, these target translations may be detected in an Iconclass definition and just copied by model. In Figure 2b , we can see that the baseline model (a) and model (d) in the basic testing scenario produce grammatically incorrect outputs. The Dutch fixed expression wordt verliefd op is translated almost literally as being loved on. However, we can see that model (d) finds a part of the right translation in the additional information and copies it. Named entities. Artwork titles densely feature named entities in comparison to general corpora and, hence, they can be a serious issue for NMT models. Similarly to Banar et al. (2020) , we observe that the models in the basic testing scenario tend to copy named entities instead of attempting a proper translation. However, we observe that if a correct named entity is provided in the additional information, the model is able to generate the correct target translation. From Figure 2c , we can see that model (d) derives the correct named entity Ulysses, while other scenarios are less successful. Lack of context. The titles of artworks naturally differ from the sentences in more general corpora and can be very short. The lack of context may cause translation difficulties. In the example from Figure 2d , the title consists of only one word. The baseline model (a) and model (d) in the basic testing scenario struggle to translate the title Pride correctly and generate the non-sense translations Events and Christ, correspondingly. In this case, the additional information helps model (d) to generate the right answer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1711, |
|
"end": 1730, |
|
"text": "Banar et al. (2020)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 162, |
|
"text": "Figure 2a", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1198, |
|
"end": 1207, |
|
"text": "Figure 2b", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 2023, |
|
"end": 2032, |
|
"text": "Figure 2c", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 2345, |
|
"end": 2354, |
|
"text": "Figure 2d", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In this paper, we utilized the Iconclass framework as a source of additional information for NMT of artwork titles. We extracted English and Dutch keywords and English definitions from the Iconclass codes. This information was concatenated to the source sentences. Experiments show that augmenting the source sentences with the Iconclass definitions for the objects under scrutiny improves the overall translation quality by a considerable margin. On the basis of a manual inspection of the output, we argue that the NMT model is able to successfully capitalize on the additional information extracted from the Iconclass definitions. There are various reasons for this. Firstly, the model is able to recognize any named entities in the concatenated part (that are lacking in the actual source title) and it can correctly inject them in the translation. Secondly, the data augmentation approach improves the lexical aspects of the translation, providing useful semantic cues in the case of limited context. Thirdly, the model is able to detect correct translations in the concatenated part and integrate them appropriately in the translation. However, the augmentation of the source sentences with the keywords and any type of extended context that we applied do not show any promising results. In future work, we would like to extend the current pipeline with a model that automatically matches the source sentence with the corresponding Iconclass code. In addition, it may be beneficial to incorporate visual features extracted from artworks and, hence, to perform a multi-modal matching. Iconclass contains 28,000 hierarchically ordered definitions that makes the matching problem extremely sophisticated. We plan to investigate the feasibility of such a matching strategy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://labs.brill.com/ictestset/ 2 https://rkd.nl/en/explore/images", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/rwth-i6/CharacTER 4 https://github.com/m-popovic/chrF", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Contextual handling in neural machine translation: Look behind, ahead and on both sides", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Ruchit Rajeshkumar Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matteo", |
|
"middle": [], |
|
"last": "Turchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Negri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "21st Annual Conference of the European Association for Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruchit Rajeshkumar Agrawal, Marco Turchi, and Matteo Negri. 2018. Contextual handling in neural machine translation: Look behind, ahead and on both sides. In 21st Annual Conference of the European Association for Machine Translation, pages 11-20.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1409.0473" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Transfer learning for digital heritage collections: Comparing neural machine translation at the subword-level and character-level", |
|
"authors": [ |
|
{ |
|
"first": "Nikolay", |
|
"middle": [], |
|
"last": "Banar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karine", |
|
"middle": [], |
|
"last": "Lasaracina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Kestemont", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th International Conference on Agents and Artificial Intelligence", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "522--529", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikolay Banar, Karine Lasaracina, Walter Daelemans, and Mike Kestemont. 2020. Transfer learning for digital heritage collections: Comparing neural machine translation at the subword-level and character-level. In Pro- ceedings of the 12th International Conference on Agents and Artificial Intelligence -Volume 1: ARTIDIGH,, pages 522-529. INSTICC, SciTePress.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Evaluating discourse phenomena in neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Bawden", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "16th Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1304--1313", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rachel Bawden, Rico Sennrich, Alexandra Birch, and Barry Haddow. 2018. Evaluating discourse phenomena in neural machine translation. In 16th Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1304-1313.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Neural fuzzy repair: Integrating fuzzy matches into neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Bram", |
|
"middle": [], |
|
"last": "Bult\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arda", |
|
"middle": [], |
|
"last": "Tezcan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "57th Conference of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1800--1809", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bram Bult\u00e9 and Arda Tezcan. 2019. Neural fuzzy repair: Integrating fuzzy matches into neural machine transla- tion. In 57th Conference of the Association for Computational Linguistics (ACL), pages 1800-1809.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "On the properties of neural machine translation: Encoder-decoder approaches", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Van Merri\u00ebnboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of SSST-8, Eighth Workshop on Syntax, Semantics and Structure in Statistical Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "103--111", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho, Bart van Merri\u00ebnboer, Dzmitry Bahdanau, and Yoshua Bengio. 2014. On the properties of neural machine translation: Encoder-decoder approaches. In Proceedings of SSST-8, Eighth Workshop on Syntax, Semantics and Structure in Statistical Translation, pages 103-111.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Machine learning for cultural heritage: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Fiorucci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marina", |
|
"middle": [], |
|
"last": "Khoroshiltseva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimiliano", |
|
"middle": [], |
|
"last": "Pontil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arianna", |
|
"middle": [], |
|
"last": "Traviglia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessio", |
|
"middle": [], |
|
"last": "Del Bue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stuart", |
|
"middle": [], |
|
"last": "James", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Pattern Recognition Letters", |
|
"volume": "133", |
|
"issue": "", |
|
"pages": "102--108", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Fiorucci, Marina Khoroshiltseva, Massimiliano Pontil, Arianna Traviglia, Alessio Del Bue, and Stuart James. 2020. Machine learning for cultural heritage: A survey. Pattern Recognition Letters, 133:102-108.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Understanding the difficulty of training deep feedforward neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Glorot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the thirteenth international conference on artificial intelligence and statistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "249--256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understanding the difficulty of training deep feedforward neural net- works. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pages 249-256.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Boosting neural machine translation with similar translations", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Xu Jitao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Josep", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Crego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Senellart", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1580--1590", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "XU Jitao, Josep M Crego, and Jean Senellart. 2020. Boosting neural machine translation with similar translations. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1580-1590.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Microsoft translator at wmt 2019: Towards large-scale document-level neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "225--233", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt. 2019. Microsoft translator at wmt 2019: Towards large-scale document-level neural machine translation. In Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 225-233.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6980" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "OpenNMT: Opensource toolkit for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuntian", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Senellart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proc. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Klein, Yoon Kim, Yuntian Deng, Jean Senellart, and Alexander M. Rush. 2017. OpenNMT: Open- source toolkit for neural machine translation. In Proc. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Imagenet classification with deep convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1097--1105", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. 2012. Imagenet classification with deep convolutional neural networks. In Advances in neural information processing systems, pages 1097-1105.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Deep learning. nature", |
|
"authors": [ |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "521", |
|
"issue": "", |
|
"pages": "436--444", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yann LeCun, Yoshua Bengio, and Geoffrey Hinton. 2015. Deep learning. nature, 521(7553):436-444.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Fully character-level neural machine translation without explicit segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Hofmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "365--378", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Lee, Kyunghyun Cho, and Thomas Hofmann. 2017. Fully character-level neural machine translation with- out explicit segmentation. Transactions of the Association for Computational Linguistics, 5:365-378.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Microsoft coco: Common objects in context", |
|
"authors": [ |
|
{ |
|
"first": "Tsung-Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Maire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serge", |
|
"middle": [], |
|
"last": "Belongie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Hays", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Perona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deva", |
|
"middle": [], |
|
"last": "Ramanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Doll\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C Lawrence", |
|
"middle": [], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "European conference on computer vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "740--755", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C Lawrence Zitnick. 2014. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Effective approaches to attention-based neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1412--1421", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minh-Thang Luong, Hieu Pham, and Christopher D Manning. 2015. Effective approaches to attention-based neural machine translation. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 1412-1421.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th annual meeting on association for computational linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting on association for computational linguistics, pages 311-318. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Training tips for the transformer model", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Popel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "The Prague Bulletin of Mathematical Linguistics", |
|
"volume": "110", |
|
"issue": "1", |
|
"pages": "43--70", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Popel and Ond\u0159ej Bojar. 2018. Training tips for the transformer model. The Prague Bulletin of Mathemat- ical Linguistics, 110(1):43-70.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "chrf: character n-gram f-score for automatic mt evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Popovi\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Tenth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "392--395", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maja Popovi\u0107. 2015. chrf: character n-gram f-score for automatic mt evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392-395.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Brill iconclass ai test set", |
|
"authors": [ |
|
{ |
|
"first": "Etienne", |
|
"middle": [], |
|
"last": "Posthumus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Etienne Posthumus. 2020. Brill iconclass ai test set. https://labs.brill.com/ictestset/.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Transfer learning in natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Tutorials", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "15--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Ruder, Matthew E Peters, Swabha Swayamdipta, and Thomas Wolf. 2019. Transfer learning in natural language processing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Tutorials, pages 15-18.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Deep learning in neural networks: An overview", |
|
"authors": [ |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Neural networks", |
|
"volume": "61", |
|
"issue": "", |
|
"pages": "85--117", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00fcrgen Schmidhuber. 2015. Deep learning in neural networks: An overview. Neural networks, 61:85-117.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Sequence to sequence learning with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3104--3112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural networks. In Advances in neural information processing systems, pages 3104-3112.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Neural machine translation with extended context", |
|
"authors": [ |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Scherrer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Third Workshop on Discourse in Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "82--92", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00f6rg Tiedemann and Yves Scherrer. 2017. Neural machine translation with extended context. In Proceedings of the Third Workshop on Discourse in Machine Translation, pages 82-92.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Parallel data, tools and interfaces in opus", |
|
"authors": [ |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Lrec", |
|
"volume": "2012", |
|
"issue": "", |
|
"pages": "2214--2218", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00f6rg Tiedemann. 2012. Parallel data, tools and interfaces in opus. In Lrec, volume 2012, pages 2214-2218.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Iconclass : an iconographic classification system", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Vellekoop", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Tholen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Couprie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1973, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Vellekoop, E. Tholen, and L. D. Couprie. 1973. Iconclass : an iconographic classification system. North- Holland Pub. Co., Amsterdam.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Character: Translation edit rate on character level", |
|
"authors": [ |
|
{ |
|
"first": "Weiyue", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan-Thorsten", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hendrik", |
|
"middle": [], |
|
"last": "Rosendahl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the First Conference on Machine Translation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "505--510", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weiyue Wang, Jan-Thorsten Peter, Hendrik Rosendahl, and Hermann Ney. 2016. Character: Translation edit rate on character level. In Proceedings of the First Conference on Machine Translation: Volume 2, Shared Task Papers, pages 505-510.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Example translations with attention maps for the model that uses English Iconclass definitions for data augmentation. We also provide the target sentence, translation of the model without data augmentation (Without DA) and translation of the baseline model (Baseline) fine-tuned without additional information. (a) Target: 'Creation and the fall of Man'. Without DA: 'Creation and a fall of mankind'. Baseline: 'Creation and a fall of mankind'. (b) Target: 'Mercury falls in love with Herse'. Without DA: 'Mercury being loved on Herse' Baseline: 'Mercury being loved on Herse'. (c) Target: 'Circe begging Ulysses to save her life'. Without DA: 'Circe pleading with Odysseus to save her life'. Baseline: 'Circe pleads with Odysseus to save her life in her life'. (d) Target: 'Pride'. Without DA: 'Christ'. Baseline: 'Events'.", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"num": null, |
|
"text": "", |
|
"content": "<table><tr><td>N Extension</td><td>Definition</td><td>Keywords</td></tr><tr><td>1 71H7</td><td colspan=\"2\">David and Bathsheba (2 Samuel 11-12) Bathsheba, Samuel-2 11-12</td></tr><tr><td>71H71</td><td>David, from the roof (or balcony) of his</td><td>balcony, bathing, love at first sight,</td></tr><tr><td/><td>palace, sees Bathsheba bathing</td><td>palace, roof, spying</td></tr><tr><td>2 25G41</td><td>flowers</td><td>flower</td></tr><tr><td>25G41(ROSE)</td><td>flowers: rose</td><td>rose</td></tr><tr><td>3 25F23(LION)</td><td colspan=\"2\">beasts of prey, predatory animals: lion lion</td></tr><tr><td colspan=\"2\">25F23(LION)(+12) beasts of prey, predatory animals: lion</td><td>Wappentier, araldica, heraldisches</td></tr><tr><td/><td>(+ heraldic animals)</td><td>Symbol, heraldry, h\u00e9raldique, lion</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"num": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"num": null, |
|
"text": "Figure 1: Examples of images assigned Iconclass codes fromPosthumus (2020). The Iconclass code 11F25 provides the English definition 'Mater Dolorosa', the set of English keywords 'Mater Dolorosa, bust, full length, half-length, head , mother' and the set of the Dutch keywords 'Mater Dolorosa, buste, full length , half-length, hoofd, moeder'. The Iconclass code 31A235 is less informative with the definition 'sitting figure' and only one English keyword 'sitting'.The Iconclass code 11I35 only has an English definition: 'other groups of apostles'. EN sentence NL sentence EN description EN keywords NL keywords Train 52.01 \u00b1 29.64 53.75 \u00b1 29.86 51.80 \u00b1 34.60 26.58 \u00b1 21.48 26.85 \u00b1 21.78 Dev 51.90 \u00b1 29.90 53.81 \u00b1 30.37 52.20 \u00b1 34.75 26.42 \u00b1 21.65 26.84 \u00b1 21.65 Test 51.76 \u00b1 29.67 52.61 \u00b1 35.05 52.61 \u00b1 35.05 26.66 \u00b1 21.10 26.95 \u00b1 21.41", |
|
"content": "<table><tr><td>Split</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"num": null, |
|
"text": "Abas, Ascalabus, or Stellio) laughs at Ceres, because she drinks too avidly while she is resting at an old woman's house English keywords Ascalabus, boy, drinking, laughing, old woman, thirst Dutch keywords Ascalabus, dorst, drinken, jongen, lachend, old woman", |
|
"content": "<table><tr><td>source</td><td>Ceres bespot door Stellio (Metamorphosen 5: 446-461)</td></tr><tr><td>target</td><td>Mocking of Ceres by Stellio (Metamorphoses 5: 446-461)</td></tr><tr><td colspan=\"2\">English definition a little boy (concatenation 1 Ceres bespot door Stellio (Metamorphosen 5: 446-461) (txs) a little boy (Abas,</td></tr><tr><td/><td>Ascalabus, or Stellio) laughs at Ceres, because she drinks too avidly while she</td></tr><tr><td/><td>is resting at an old woman's house (txe)</td></tr><tr><td>concatenation 2</td><td>Ceres bespot door Stellio (Metamorphosen 5: 446-461) (kws) Ascalabus, boy,</td></tr><tr><td/><td>drinking, laughing, old woman, thirst (kwe)</td></tr><tr><td>concatenation 3</td><td>Ceres bespot door Stellio (Metamorphosen 5: 446-461) (kws) Ascalabus,</td></tr><tr><td/><td>dorst, drinken, jongen, lachend, old woman (kwe)</td></tr><tr><td>concatenation 4</td><td>Ceres bespot door Stellio (Metamorphosen 5: 446-461) (txs) a little boy (Abas,</td></tr><tr><td/><td>Ascalabus, or Stellio) laughs at Ceres, because she drinks too avidly while</td></tr><tr><td/><td>she is resting at an old woman's house (txe)(kws) Ascalabus, boy, drinking,</td></tr><tr><td/><td>laughing, old woman, thirst (kwe)</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"num": null, |
|
"text": "Example concatenations of a title and the information from the Iconclass code 92M132.", |
|
"content": "<table><tr><td>source</td><td>Johannes de Doper en de H. Hieronymus</td></tr><tr><td>target</td><td>John the Baptist and St. Jerome</td></tr><tr><td>definition 1</td><td>the monk and hermit Jerome (Hieronymus); possible attributes: book, cardi-</td></tr><tr><td/><td>nal's hat, crucifix, hour-glass, lion, skull, stone</td></tr><tr><td>definition 2</td><td>John the Baptist; possible attributes: book, reed cross, baptismal cup, honey-</td></tr><tr><td/><td>comb, lamb, staff</td></tr><tr><td colspan=\"2\">training sentence 1 Johannes de Doper en de H. Hieronymus (txs) the monk and hermit Jerome</td></tr><tr><td/><td>(Hieronymus); possible attributes: book, cardinal's hat, crucifix, hour-glass,</td></tr><tr><td/><td>lion, skull, stone (txe)</td></tr><tr><td colspan=\"2\">training sentence 2 Johannes de Doper en de H. Hieronymus (txs) John the Baptist; possible at-</td></tr><tr><td/><td>tributes: book, reed cross, baptismal cup, honeycomb, lamb, staff (txe)</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"html": null, |
|
"num": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |