|
{ |
|
"paper_id": "L16-1003", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:07:55.132180Z" |
|
}, |
|
"title": "Enhancing Access to Online Education: Quality Machine Translation of MOOC Content", |
|
"authors": [ |
|
{ |
|
"first": "Valia", |
|
"middle": [], |
|
"last": "Kordoni", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Humboldt University", |
|
"location": { |
|
"settlement": "Berlin", |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Antal", |
|
"middle": [], |
|
"last": "Van Den Bosch", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Radboud University", |
|
"location": { |
|
"settlement": "Nijmegen", |
|
"country": "Netherlands" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Katia", |
|
"middle": [], |
|
"last": "Kermanidis", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ionian University", |
|
"location": { |
|
"settlement": "Corfu", |
|
"country": "Greece" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Vilelmini", |
|
"middle": [], |
|
"last": "Sosoni", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ionian University", |
|
"location": { |
|
"settlement": "Corfu", |
|
"country": "Greece" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Kostadin", |
|
"middle": [], |
|
"last": "Cholakov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Humboldt University", |
|
"location": { |
|
"settlement": "Berlin", |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Iris", |
|
"middle": [], |
|
"last": "Hendrickx", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Radboud University", |
|
"location": { |
|
"settlement": "Nijmegen", |
|
"country": "Netherlands" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Huck", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Edinburgh", |
|
"location": { |
|
"country": "UK" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Dublin City University", |
|
"location": { |
|
"country": "Ireland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The present work is an overview of the TraMOOC (Translation for Massive Open Online Courses) research and innovation project, a machine translation approach for online educational content. More specifically, videolectures, assignments, and MOOC forum text is automatically translated from English into eleven European and BRIC languages. Unlike previous approaches to machine translation, the output quality in TraMOOC relies on a multimodal evaluation schema that involves crowdsourcing, error type markup, an error taxonomy for translation model comparison, and implicit evaluation via text mining, i.e. entity recognition and its performance comparison between the source and the translated text, and sentiment analysis on the students' forum posts. Finally, the evaluation output will result in more and better quality in-domain parallel data that will be fed back to the translation engine for higher quality output. The translation service will be incorporated into the Iversity MOOC platform and into the VideoLectures.net digital library portal.", |
|
"pdf_parse": { |
|
"paper_id": "L16-1003", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The present work is an overview of the TraMOOC (Translation for Massive Open Online Courses) research and innovation project, a machine translation approach for online educational content. More specifically, videolectures, assignments, and MOOC forum text is automatically translated from English into eleven European and BRIC languages. Unlike previous approaches to machine translation, the output quality in TraMOOC relies on a multimodal evaluation schema that involves crowdsourcing, error type markup, an error taxonomy for translation model comparison, and implicit evaluation via text mining, i.e. entity recognition and its performance comparison between the source and the translated text, and sentiment analysis on the students' forum posts. Finally, the evaluation output will result in more and better quality in-domain parallel data that will be fed back to the translation engine for higher quality output. The translation service will be incorporated into the Iversity MOOC platform and into the VideoLectures.net digital library portal.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Massive \u039fpen Online Courses (MOOCs) have been growing in impact and popularity in recent years. According to 2013 statistics 1 , more than 200 universities around the globe are involved in their creation, with the participation of more than 1,300 instructors, more than 1,200 courses on offer and around 10 million users being actively enrolled. Apart from their significant contribution to lifelong education, MOOCs are viewed as a tool to help identify and fill the gap that exists in the digital skills of workers across Europe. However, the biggest obstacle standing in the way of further growth in online courses is the language barrier, given that the vast majority of such courses are offered in English. Although the need for translating MOOC content has been acknowledged by the majority of course providers 2 , the solutions provided so far have been fragmentary, human-based, and implemented off-line. TraMOOC 3 constitutes a solution to online course content translation that aims at eleven target languages, is automatic -i.e. it is based on statistical machine translation (SMT) techniques-and is therefore easily extendable to other languages, is adaptable to various types of educational content genre, is independent of course domain, and is designed to produce translations online via its integration in the use-case platforms. The remainder of this paper is organized as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "1 https://www.edsurge.com/n/2013-12-22-moocs-in-2013-breaki ng-down-the-numbers 2 http://blog.coursera.org/post/50452652317/coursera-partnering -with-top-global-organizations 3 www.tramooc.eu Section 2 provides a brief overview of the project. Section 3 describes the collected data, and their domain-and genre-specific idiosyncracies, while Section 4 reports some preliminary translation results. The crowdsourcing tasks involved are presented in Section 5, and the multimodal evaluation schemata and the end product use case plans are described in Sections 6 and 7, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "TraMOOC aims at developing high-quality translation of all types of text genre included in MOOCs (e.g. assignments, tests, presentations, lecture subtitles, forum text) from English into eleven European and BRIC languages, i.e. DE, IT, PT, EL, NL, CS, BG, CR, PL, RU, ZH that constitute strong use-cases, many of which are hard to translate into and have relatively weak MT support. Phrase-based and syntax-based SMT models are developed to address language diversity and support the language-independent nature of the methodology. For high-quality MT and to add value to existing infrastructure, extensive advanced bootstrapping of new resources is performed, while at the same time innovative multi-modal automatic and human evaluation schemata are applied. For human evaluation, an innovative, strict-access control, time-and cost-efficient crowdsourcing set-up is used, while translation experts, domain experts and end users are also involved. Results are combined into a feedback vector and used to refine parallel data and retrain translation models towards a more accurate second-phase translation output. The project results will be showcased and tested on the Iversity MOOC platform and on the VideoLectures.NET digital video lecture library.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of TraMOOC", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "The translation engine employed in TraMOOC is Moses 4 , the most widely used SMT toolkit available in academia as well as in commercial environments, mainly due to its flexibility, modularity, open-source licence, and competitive translation results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of TraMOOC", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "TraMOOC employs a fine-grained definition of the term educational data and defines domains in terms of subject areas and text types. Regarding the latter, we make a distinction between formal (lectures subtitles, presentations, assignments and tests) and informal (forum data) domains. Regarding the former, we divide the data into scientific and general-domain. Such distinctions are extremely important, especially with respect to out-of-domain data used to train the SMT system. Although not being strictly educational, parts of the out-of-domain data contain domain-specific terms and phrases which also occur in the TraMOOC domains. For example, out-of-domain parallel corpora derived from OpenOffice and PHP manuals are likely to contain terms which are also frequently used in scientific MOOCs on programming and other IT topics. To date, there are very few parallel educational corpora for the eleven languages targeted in TraMOOC that are readily available to train SMT systems. One of the significant scientific contributions of TraMOOC is to produce parallel corpora from the educational domain and make them publicly available after the end of the project. The lack of in-domain parallel data is a real challenge. SMT systems heavily rely on the domains of the parallel data used for training. TraMOOC addresses this issue in two ways: (i) crawling multilingual educational web resources in order to seed the building of parallel corpora, and (ii) building parallel corpora via crowdsourced translation of educational texts. Additionally, we are also exploring the possibility of using parallel corpora from other, closely related languages. For example, we are currently experimenting with adding Serbian parallel data to the already available Croatian parallel corpora. In this way, we can increase the amount of parallel data available for low-resourced languages such as Croatian. Translated subtitles and other course materials from the Coursera education platform 5 have been one of the major sources for the crawling of parallel educational data 6 . The data compiled so far contains translated materials from over 250 courses offered by Coursera. We have managed to extract translations for all eleven languages targeted in TraMOOC, but the size of the available parallel data varies strongly for each language pair. While for German and Italian there are over 2 million words of parallel data, for Czech and Greek there are only about 200,000 words available. The translations are produced by course participants who usually translate into their native languages. Translations are done via the Transifex platform 7 . We have developed Python scripts which download the source texts (STs) and the target texts (TTs), i.e. the translations, from Transifex automatically. Since every translation contains a language code, we can easily extract only translations to one of the eleven target languages. Sometimes, there are multiple translations available for a single English segment. In most such cases, Coursera users have voted on the quality of the various translations available and we extract the translation with the highest number of votes. Regarding the quality of the translations one should keep in mind that this is basically a crowdsourced corpus. Apart from users voting on the quality of the translation, there is hardly any other mechanism for quality assurance. Therefore, we have implemented some basic heuristics for pre-processing and quality checks. For example, we filter out very short segments such as music, silence, applause, etc. Further, we check the length of the source and translated segments. Parallel segments with very large differences in their length are considered dubious and are removed from the corpus. These are mostly segments which for some reason were translated in a wrong language. It is worth noting that Transifex is primarily used for translating subtitles. We found out that the majority of segments represent real, well-formed sentences but the sentences are usually short. Sentence segmentation is therefore generally good, although there are some segments which do not represent well-formed sentences. Furthermore, there are ongoing experiments with web resources of the EU which can be exploited for all EU languages in the project, e.g. the EU Teacher's Corner 8 . It includes e-books and other electronic educational materials available in many of the 24 official EU languages. All materials aim at educating students from different age groups. The size of the corpus varies for each language because not all materials are available in the same set of languages. We have also obtained the QCRI Educational Domain Corpus created by the Qatar Computation Research Institute (Abdelali et al., 2014) . The corpus consists mostly of educational lectures from the Khan Academy and Udacity educational organisations, but there is also some content from selected Coursera courses. The lectures have been collaboratively transcribed and translated with the AMARA web-based platform 9 . Therefore, this is also a crowdsourced corpus. The data have been cleaned from incomplete subtitles, as well as subtitles which were in a wrong language. Other than that, no further steps for quality assurance have been taken. The corpus contains 20 languages, including 9 TraMOOC languages. There is no parallel data for Croatian and Greek in the corpus. Last but not least, we also make use of in-domain parallel data available only for some of the eleven target languages. For example, for German we have obtained parallel data produced within the EU-funded transLectures project (FP7 ICT project #287755) 10 . The data includes transcripts of selected courses available from VideoLectures.NET 11 which were translated by professional translators. The lectures include topics ranging from Computer Science, Technology, Mathematics, Physics, Chemistry and Biology to Business, Social Science and Arts. Although the size of the data is not that large (around 300,000 words), such high-quality parallel data can be very useful for the tuning of the MT models. Table 1 provides an overview of the size of parallel in-domain data collected so far for each of the eleven TraMOOC languages. The size is given in millions of English words. Creating parallel corpora via crowdsourcing is another way for obtaining in-domain data which we are pursuing in TraMOOC. We aim at annotating 1 million words per language pair. Due to the use of filtering techniques, like the selection of the best choice among redundant translations, or the automatic detection of errors, in order to ensure the quality of the crowdsourced data, the size of the usable in-domain parallel corpora is expected to be between 800,000 and 850,000 words per language pair. The texts will be carefully selected from subtitles of MOOC courses, course assignments, slides, and other course materials. The forum data of TraMOOC's industrial partner, Iversity, are also included since student forums will also be automatically translated for the purposes of implicit translation evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 4745, |
|
"end": 4768, |
|
"text": "(Abdelali et al., 2014)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 6110, |
|
"end": 6117, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Description of the Data", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "Size ( ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language pair", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Crowdsourcing has been employed extensively for the implementation of human intelligence natural language processing (NLP) tasks in recent years (Callison, 2009; Zaidan & Burch, 2011; Zbib et al., 2012; Ambati, 2012; Finin et al., 2010; Hsueh et al., 2009) . TraMOOC involves crowdsourcing for realizing sub-goals that require human intervention in order to meet its high-quality output standards against upcoming challenges, including the large number of targeted languages, the fragmentary or weak SMT infrastructure support for the majority of the languages, and the multiple domains and text genres involved. The CrowdFlower 14 platform was chosen for the implementation of the crowdsourcing activities because of (a) its configurability, (b) its robust infrastructure, (c) its densely populated crowd channels and the evaluation and ranking process they undergo, (d) its convenient payment options, and (e) its high reception and popularity level in the microtasking field. The targeted crowds consist of (a) translation experts, (b) an internal group of workers with a known background in linguistics and/or translation, and (c) a group of external contributors from the platform's crowd channels. For the latter crowd category, apart from the standard channel evaluation processes applied by the platform to isolate spammers and contributors with poor language skills, further quality assurance measures are taken like \uf0be access control using quiz data that are far from straightforward to address and \uf0be the assignment of each row (for a percentage of the total data rows) to more than one contributors (redundancy). A separate crowdsourcing task is set up for every language pair and for every NLP activity type. Approximately a total of 2.2 M rows (segments) will be processed. The cost of each activity type varies, depending on its complexity, and is in alignment to the costs reported for similar tasks in the literature. A microtask in CrowdFlower requires the configuration of several parameter settings that pertain to the number of rows to be tackled in one page, the accepted error rate per page, the maximum number of judgments per contributor, etc. To optimize the configuration a series of trial set-ups have been run before the main tasks, where the participants' comments were recorded and taken into account. TraMOOC focuses on three types of NLP activities, namely human translation, evaluation of MT output, and text annotation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 161, |
|
"text": "(Callison, 2009;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 162, |
|
"end": 183, |
|
"text": "Zaidan & Burch, 2011;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 202, |
|
"text": "Zbib et al., 2012;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 216, |
|
"text": "Ambati, 2012;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 217, |
|
"end": 236, |
|
"text": "Finin et al., 2010;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 256, |
|
"text": "Hsueh et al., 2009)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourcing", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "Human translation focuses on the development of in-domain (educational) and in-genre parallel data for training the translation engine, in particular for language pairs that are not adequately equipped with parallel data. This task will be available to internal and external contributors. Each contributor has to translate a set of ten segments in order to complete, submit and get paid for a job. A maximum number of 600 segments have been assigned per contributor. The goal for this task is for the number of segments to be translated per language to exceed 100,000. The cost per segment has been set at 0.04\u20ac.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The evaluation task includes several distinct sub-activities, which will form four different crowdsourcing tasks either independently or combined:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "1. Likert scale adequacy/fluency marking and post-editing. This task will be opened to internal and external contributors, and will involve approximately 75,000 segments per language pair. The cost per segment has been set at 0.02\u20ac. 2. This task includes Task 1 plus error type mark up.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Error types will include inflectional morphology, word order, omission, addition and mistranslation. This task will target approximately 15,000 segments per language, and will be mainly carried out by internal contributors and experts. The segment cost has been set at 0.05\u20ac.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "model comparison for two language pairs, EN-DE and EN-CZ, for 1,000 segments per pair. This task will be mainly carried out by experts. Segment cost has been set at 0.05\u20ac. 4. Ranking multiple translations of a given segment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error taxonomy-based evaluation for translation", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "Redundant translations provided in the translation task will be used in this evaluation task. External and internal contributors will be asked to rank the provided translation in decreasing quality. This task will target around 8,000 segments per language pair. Segment cost has been set at 0.02\u20ac. Experimentation with various crowd types and comparative testing between different task complexity levels aims at investigating the usability, the usefulness and the efficiency of sophisticated human evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error taxonomy-based evaluation for translation", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "Text annotation involves two different crowdsourcing tasks: entity annotation and sentiment annotation. The former will be applied to 1,000 segments per language pair, for all eleven pairs. Each segment will be annotated three times by three distinct contributors. The annotation process includes the markup of a potential single-or multi-word entity in the source segment, the linking with its Wikipedia URL (if available), and then the parallel process in the target segment. The segment cost has been set at 0.05\u20ac. Sentiment annotation will be applied to English segments only, taken from the MOOC forum students' text. Contributors will identify whether a given segment contains a positive, neutral or negative opinion regarding the machine-translated course content, produced by the TraMOOC translation engines. The cost for each segment annotation is 0.01\u20ac. The aforementioned annotations are used for training and/or testing the entity recognition and the sentiment analysis tools; the output of these tools facilitates the implicit evaluation setup described in detail in the next section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Explicit evaluation involves automatic and human evaluation of the translation output. In particular, n-gram similarity-based metrics, e.g. BLEU or NIST (Papineni et al., 2002) , or word-editing based metrics, e.g. TER 15 , are used for estimating the accuracy of the translated text. Diagnostic evaluation is performed focused on specific linguistic phenomena and error types. Comparative analysis of the results is performed across translation models, across language models, across languages, and across text types. Human evaluation is performed via crowdsourcing, as described earlier, and involves domain experts, translation experts and non-experts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 176, |
|
"text": "BLEU or NIST (Papineni et al., 2002)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explicit Evaluation", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "The comparative analysis of translation models will comprise automatic and human evaluation of syntax-based SMT, phrase-based SMT, and Neural MT, in the English-German and English-Czech language pairs. This stage will also provide a valuable linguistic checkpoint for ST issues. Thereafter, human evaluation by experts and non-experts will rate quality, highlight commonly occurring errors in MT output, and provide edited TT segments for retraining the MT engines in order to improve quality and domain specificity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explicit Evaluation", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Implicit MT evaluation aims to judge the MT quality between the ST and automatically generated TT without using a manually created reference translation. In TraMOOC, topic identification and sentiment analysis will be used for this task. Topic identification is performed via wikification (Mihalcea, 2007) . Sentiment analysis extracts the opinion of end-users regarding the TT by applying opinion mining techniques to user contributions posted on the MOOC forum. For the implicit MT evaluation we focus on topical information elements (named entities, events, specific terms) in source and target documents. Topic identification can be done in several ways such as computing word weights (Wartena et al., 2010) , using the document structure to find the main topics (Hearst, 1995) , or applying an (un)supervised topic modeling technique such as LDA (Blei et al., 2003) . In TraMOOC we make use of the fact that most Wikipedia pages have translations in many other languages, and use wikification for implicit MT evaluation. Using name translation as a measure for overall MT quality has been suggested before and has been shown to correlate well with human MT judgments (Hirschman et al., 2000) . With wikification we aim to generalize this technique. Such a wikification system detects both named entities and terms (topics) in a document and links them to their corresponding Wikipedia page. We apply a Wikifier (Ratinov & Roth, 2011) to find and link the topics in the English source data to their relevant Wikipedia pages. Next, we use the alignment between the source and target sentence to get the corresponding translation of the topics in the TT. We check whether this translated topic corresponds to the same Wikipedia page in the ST. When such a match is found, we count this as a correct topic translation, and when no matching page is found we count it as an error. The transformation of the Wikification results into a reliable implicit evaluation MT score is a crucial research question that we will pursue in the course of the project. We create a reference set for the tuning and testing of the Wikifier and the development of the implicit score metric. For this reference set we collected 1000 sentences from MOOC courses in the eleven languages. These sentences are manually annotated with Wikipedia links via the Crowdsourcing platform. Each sentence is annotated by three different annotators and only annotations supported by at last two annotators are kept in the final reference set. This set will also give us an indication of the limits of the wikification method, such as its dependency on coverage of topics per language. Greek, for example, only has around 115,000 16 Wikipedia pages available and many detected topics in the English will not have corresponding Greek Wikipedia pages. Therefore we expect a lower coverage of the implicit evaluation method for low-resource languages as can be illustrated in the following example. In Example 1 we show a sentence taken from the Iversity MOOC course on Critical Thinking. Examples 2 and 3 show the automatic translation of this sentence in Portuguese and Greek produced by the prototype-1 TraMOOC MT system. In both cases the translation of the name is only partly correct. In Portuguese the correct translation should have been 'Trilema de M\u00fcnchhausen' or the synonym 'Trilema de Agripa' that both point to the same existing Wikipedia page. Due to the incorrect translation, the implicit MT evaluation will count this as a translation error. For Greek, no equivalent Wikipedia page exists and the translation quality of '\u039f \u0391\u03b3\u03c1\u03af\u03c0\u03c0\u03b1\u03c2 \u03c4\u03bf\u03c3 M\u03c3\u03b3\u03c4\u03ac\u03bf\u03c3\u03c3\u03b5\u03bd' cannot be verified.", |
|
"cite_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 305, |
|
"text": "(Mihalcea, 2007)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 689, |
|
"end": 711, |
|
"text": "(Wartena et al., 2010)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 767, |
|
"end": 781, |
|
"text": "(Hearst, 1995)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 851, |
|
"end": 870, |
|
"text": "(Blei et al., 2003)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1172, |
|
"end": 1196, |
|
"text": "(Hirschman et al., 2000)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2695, |
|
"end": 2697, |
|
"text": "16", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implicit Evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "(1) Agrippa's Trilemma states that there are three options if we try to prove any truth .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implicit Evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "(2) PT: Agrippa Munchausen afirma que existem tr\u00eas op\u00e7\u00f5es se tentarmos provar qualquer verdade.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implicit Evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "(3) EL: \u039f \u0391\u03b3\u03c0\u03af\u03c0\u03c0\u03b1\u03c1 \u03b7\u03bf\u03c2 M\u03c2\u03b3\u03c3\u03ac\u03bf\u03c2\u03b6\u03b5\u03bd \u03b1\u03bd\u03b1\u03b8\u03ad\u03c0\u03b5\u03b9 \u03cc\u03b7\u03b9 \u03c2\u03c0\u03ac\u03c0\u03c3\u03bf\u03c2\u03bd \u03b7\u03c0\u03b5\u03b9\u03c1 \u03b5\u03c0\u03b9\u03bb\u03bf\u03b3\u03ad\u03c1 \u03b1\u03bd \u03c0\u03c0\u03bf\u03b6\u03c0\u03b1\u03b8\u03ae\u03b6\u03bf\u03c2\u03bc\u03b5 \u03bd\u03b1 \u03b1\u03c0\u03bf\u03b4\u03b5\u03af\u03be\u03b5\u03b9 \u03ba\u03ac\u03c0\u03bf\u03b9\u03b1 \u03b1\u03bb\u03ae\u03b8\u03b5\u03b9\u03b1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implicit Evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "The technology developed in the TraMOOC project is applied to two different use-cases: the European MOOC platform, Iversity, and the VideoLectures.NET digital video lecture library. Iversity is a Berlin-based MOOC provider which launched its first MOOCs in 2013 and has grown quickly, recently reaching a cumulative 500,000 users with over 700,000 course enrollments. There are now ~50 courses from a few dozen European universities, with most users in Europe, but also from all parts of the world 17 . Courses cover areas ranging from Design, Engineering, and Computer Science to Education, Philosophy, and Life Sciences. The language of the vast majority of courses provided is English, and courses are mostly held via video lectures, though some have additional textual material, such as slides. All courses are accompanied by a forum platform that allows students and teachers to communicate. The translation prototypes generated in TraMOOC will be integrated into the Iversity platform according to the end-user requirements. VideoLectures.NET, administered by the Knowledge 4 All Foundation Ltd. and run by the dedicated Center for Transfer in Information Technologies at the Josef Stefan Institute (JSI) in Ljubljana, was founded in 2001. It functions as a free, online video library, established with the aim of promoting access to academic lectures given by distinguished scholars, scientists, researchers and academics from many scientific fields, at conferences, summer schools, workshops, and university classrooms. Lecture subtitles translated via the TraMOOC translation prototypes will be accessible through the video library.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Use Cases", |
|
"sec_num": "7." |
|
}, |
|
{ |
|
"text": "http://www.statmt.org/moses/ 5 https://www.coursera.org/ 6 Coursera has provided its consent and has given the TraMOOC consortium access to this data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.transifex.com 8 http://europa.eu/teachers-corner/recommended-material/index _en.htm 9 https://amara.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.translectures.eu/web/project-summary/ 11 http://videolectures.net/ 12 http://www.statmt.org/wmt15/translation-task.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://dumps.wikimedia.org (April 2015) 14 www.crowdflower.com", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.cs.umd.edu/~snover/tercom/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "verified at 23-02-2016 17 https://www.class-central.com/report/iversity-european-mooc s/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The research leading to these results has received funding from the European Union's Horizon 2020 research and innovation programme under Grant Agreement No 644333, as well as from the ADAPT Centre, Dublin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "8." |
|
}, |
|
{ |
|
"text": "for Natural Language Processing (EMNLP). Honolulu, USA, pp. 847-855. Hearst, M. A. (1995). Tilebars: visualization of term distribution information in full text information access. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proceedings of the Conference on Empirical Methods", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The AMARA Corpus: Building parallel language resources for the educational domain", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Abdelali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Guzman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Sajjad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 9th International Conference on Language Resources and Evaluation (LREC'14)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1856--1862", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdelali, A., Guzman, F., Sajjad, H., and Vogel, S. (2014). The AMARA Corpus: Building parallel language resources for the educational domain. In Proceedings of the 9th International Conference on Language Resources and Evaluation (LREC'14). Reykjavik, Iceland, pp. 1856-1862.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Active learning and crowd-sourcing for machine translation", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Ambati", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "978--979", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ambati, V. (2012). Active learning and crowd-sourcing for machine translation. PhD Thesis. Carnegie Mellon University. ISBN: 978-1-267-58215-7.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Latent Dirichlet Allocation", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Jordan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "", |
|
"issue": "3", |
|
"pages": "993--1022", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Blei D. M., Ng, A. Y. and Jordan, M. I. (2003). Latent Dirichlet Allocation. Journal of Machine Learning Research, (3), pp. 993-1022.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Fast, cheap, and creative: evaluating translation quality using Amazon's Mechanical Turk", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "286--295", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Callison-Burch, C. (2009). Fast, cheap, and creative: evaluating translation quality using Amazon's Mechanical Turk. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing: Volume 1. Association for Computational Linguistics, pp. 286-295.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "WIT3: Web Inventory of Transcribed and Translated Talks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Cettolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Girardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Annual Conference of the European Association for Machine Translation (EAMT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "261--268", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cettolo, M., Girardi, C. and Federico, M. (2012). WIT3: Web Inventory of Transcribed and Translated Talks. In Proceedings of the Annual Conference of the European Association for Machine Translation (EAMT). Trento, Italy, pp. 261-268.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Batch Tuning Strategies for Statistical Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Human Language Technology Conference/North American Chapter of the Association for Computational Linguistics (HLT-NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "427--436", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cherry, C. Foster, G. (2012). Batch Tuning Strategies for Statistical Machine Translation. In Proceedings of the Human Language Technology Conference/North American Chapter of the Association for Computational Linguistics (HLT-NAACL). Montr\u00e9al, Canada, pp. 427-436.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Model With Minimal Translation Units, But Decode With Phrases", |
|
"authors": [ |
|
{ |
|
"first": "\u039d", |
|
"middle": [], |
|
"last": "Durrani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Fraser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Human Language Technology Conference North American Chapter of the Association for Computational Linguistics (HLT-NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--11", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Durrani, \u039d., Fraser, A. and Schmid, H. (2013). Model With Minimal Translation Units, But Decode With Phrases. In Proceedings of the Human Language Technology Conference North American Chapter of the Association for Computational Linguistics (HLT-NAACL), Atlanta, GA, USA, pp. 1-11.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Annotating named entities in Twitter data with crowdsourcing", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Finin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Murnane", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Karandikar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Keller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Martineau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the NAACL HLT 2010 Workshop on Creating Speech and Language Data with Amazon's Mechanical Turk", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "80--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Finin, T. Murnane, W., Karandikar, A., Keller, N. and Martineau, J. (2010). Annotating named entities in Twitter data with crowdsourcing. In Proceedings of the NAACL HLT 2010 Workshop on Creating Speech and Language Data with Amazon's Mechanical Turk. Association for Computational Linguistics, pp. 80-88.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A Simple and Effective Hierarchical Phrase Reordering Model", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Galley, M., Manning, C. D. (2008). A Simple and Effective Hierarchical Phrase Reordering Model. In", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Keyword extraction using word co-occurrence", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Wartena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Brussee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Slakhorst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 23rd International Workshop on Database and Expert Systems Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "54--58", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/DEXA.2010.32" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wartena, C., Brussee, R. and Slakhorst, W. (2010). Keyword extraction using word co-occurrence. In Proceedings of the 23rd International Workshop on Database and Expert Systems Applications. Bilbao, Spain, pp. 54-58. doi:10.1109/DEXA.2010.32", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Crowdsourcing translation: Professional quality from non-professionals", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Zaidan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1220--1229", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zaidan, O. F. and Callison-Burch, C. (2011). Crowdsourcing translation: Professional quality from non-professionals. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies-Volume 1. Association for Computational Linguistics, pp. 1220-1229.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Machine translation of Arabic dialects", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Zbib", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Malchiodi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Stallard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Matsoukas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Makhoul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Zaidan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "49--59", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zbib, R., Malchiodi, E., Devlin, J., Stallard, D., Matsoukas, S., Schwartz, R., Makhoul, J., Zaidan, O. F. and Callison-Burch, C. (2012). Machine translation of Arabic dialects. In Proceedings of the 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. Association for Computational Linguistics, pp. 49-59.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>System</td><td>TraMOOC</td><td>TraMOOC</td></tr><tr><td/><td/><td>Dev</td><td>Test</td></tr><tr><td/><td>EN-EL Tuned Mixed</td><td>25.5</td><td>28.0</td></tr><tr><td/><td>Tuned TraMOOC Dev</td><td>27.9</td><td>28.5</td></tr><tr><td/><td>EN-PT Tuned Mixed</td><td>34.1</td><td>27.9</td></tr><tr><td/><td>Tuned TraMOOC Dev</td><td>36.5</td><td>29.1</td></tr><tr><td/><td>EN-IT Tuned Mixed</td><td>34.1</td><td>32.6</td></tr><tr><td/><td>Tuned TraMOOC Dev</td><td>35.9</td><td>33.0</td></tr><tr><td/><td>million words)</td><td/></tr><tr><td>EN-DE</td><td>2.7</td><td/></tr><tr><td>EN-BG</td><td>1.5</td><td/></tr><tr><td>EN-PT</td><td>4.8</td><td/></tr><tr><td>EN-EL</td><td>2.4</td><td/></tr><tr><td>EN-NL</td><td>1.3</td><td/></tr><tr><td>EN-CZ</td><td>1.5</td><td/></tr><tr><td>EN-RU</td><td>1.4</td><td/></tr><tr><td>EN-CR</td><td>0.2</td><td/></tr><tr><td>EN-PL</td><td>1.7</td><td/></tr><tr><td>EN-IT</td><td>2.3</td><td/></tr><tr><td>EN-ZH</td><td>8.7</td><td/></tr><tr><td colspan=\"2\">Table 1: Size of parallel data for all language pairs</td><td/></tr><tr><td colspan=\"2\">4. Initial Translation Results</td><td/></tr></table>", |
|
"num": null, |
|
"text": "Tyers and Alperen, 2010). These were supplemented with monolingual Wikipedia corpora 13 for all three target languages. The phrase-based models include many features which make them strong baselines. These models include standard features plus a hierarchical lexicalised reordering model(Galley & Manning, 2008), a 5-gram operation sequence model(Durrani et al., 2013), binary features indicating absolute occurrence count classes of phrase pairs, sparse phrase length features, and sparse lexical features for the top-200 words. The models were optimised to maximise BLEU(Papineni et al., 2002) with batch MIRA(Cherry & Foster, 2012) on 1000-best lists. InTable 1we compare the BLEU score performance of the systems on the TraMOOC test sets, when tuned on a mixed domain tuning set, or with the TraMOOC tuning set. The mixed tuning set includes tuning sets from TED, Europarl, and News to result in the highest possible general performance system(Huck et al., 2015). As expected, however, it is outperformed by using the domain-specific test set." |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "BLEU scores for the initial translation prototypes" |
|
} |
|
} |
|
} |
|
} |