|
{ |
|
"paper_id": "1992", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:46:21.505214Z" |
|
}, |
|
"title": "An Example-Based Method for Transfer-Driven Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Osamu", |
|
"middle": [], |
|
"last": "Furuse", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "ATR Interpreting Telephony Research Laboratories", |
|
"institution": "", |
|
"location": { |
|
"addrLine": "2-2, Seika-cho, Souraku-gun", |
|
"postCode": "619-02", |
|
"settlement": "Hikaridai, Kyoto", |
|
"country": "JAPAN" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hitoshi", |
|
"middle": [], |
|
"last": "Iida", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "ATR Interpreting Telephony Research Laboratories", |
|
"institution": "", |
|
"location": { |
|
"addrLine": "2-2, Seika-cho, Souraku-gun", |
|
"postCode": "619-02", |
|
"settlement": "Hikaridai, Kyoto", |
|
"country": "JAPAN" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper presents a method called Transfer-Driven Machine Translation (TDMT), which utilizes an example-based framework for various process and combines multi-level knowledge. An example-based framework can achieve quick processing and consistently describe knowledge. It is useful for spoken-language translation, which needs robust and efficient translation. TDMT strengthens the example-based framework by integrating it with other frameworks. The feasibility of TDMT and the advantages of the example-based framework have been confirmed with a prototype system, which translates spoken dialog sentences from Japanese to English.", |
|
"pdf_parse": { |
|
"paper_id": "1992", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper presents a method called Transfer-Driven Machine Translation (TDMT), which utilizes an example-based framework for various process and combines multi-level knowledge. An example-based framework can achieve quick processing and consistently describe knowledge. It is useful for spoken-language translation, which needs robust and efficient translation. TDMT strengthens the example-based framework by integrating it with other frameworks. The feasibility of TDMT and the advantages of the example-based framework have been confirmed with a prototype system, which translates spoken dialog sentences from Japanese to English.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "There are various strategies and various levels of knowledge for translation. When translating a simple sentence, the translated result can be produced quickly using only surface-level knowledge, When translating a complex sentence, a more elaborate process is performed using syntactic, semantic.and contextual knowledge. Complicated abstract rules which are used in conventional systems is not always appropriate for translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "An example-based framework is useful for spoken-language translation, because it can achieve quick processing and consistently describe knowledge. The example-based approach for machine translation was advocated by Nagao [Nagao 84 ]. The essence of this approach is (a) retrieval of similar examples from a bilingual database and (b) calculating distances between the examples and the input to translate it. Recently, some research following this line, including Example-Based MT [Sumita and Iida 91] , Memory-Based MT [Sato and Nagao 90] , and Analogy-Based MT [Sadler 89 ], has emerged. This paper presents a method called Transfer-Driven Machine Translation (TDMT), which utilizes an example-based framework for various process and combines multi-level knowledge.", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 230, |
|
"text": "[Nagao 84", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 500, |
|
"text": "[Sumita and Iida 91]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 519, |
|
"end": 538, |
|
"text": "[Sato and Nagao 90]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 572, |
|
"text": "[Sadler 89", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Section 2 explains transfer in example-based framework. Section 3 explains the extension of the examplebased framework. Section 4 reports on the TDMT prototype system, and section 5 reports on the experimental result. Section 6 discusses the advantages and problems of example-based framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "The explanations in the following sections use Japanese-to-English translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "An example-based framework is useful for consistently describing transfer knowledge. The essence of the example-based framework is distance calculation. This framework utilizes best-matching between the input and provided examples, and selects the most plausible target expression from many candidates. The distance is calculated quickly because of its simple mechanism. Through providing examples, most of the knowledge in TDMT can be described in the example-based framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based Transfer", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "We adopt the distance calculation method of Example-Based Machine Translation (EBMT) [Sumita and Iida 91] . The distance between words is defined as the closeness of semantic attributes in a thesaurus. Words have certain thesaurus codes, which correspond to particular semantic attributes. The distance between the semantic attributes is determined according to the relationship of their positions in the hierarchy of the thesaurus, and varies between 0 and 1 (Fig. 1) . The distance between semantic attributes A and B is expressed as d (A, B) . Provided that the words X and Y have the semantic attribute A and B, respectively, the distance between X and Y, d(X, Y), is equal to d(A, B).", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 105, |
|
"text": "[Sumita and Iida 91]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 538, |
|
"end": 544, |
|
"text": "(A, B)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 460, |
|
"end": 468, |
|
"text": "(Fig. 1)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Distance between words", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The hierarchy of the thesaurus that we use is in accordance with the thesaurus of everyday Japanese [Ohno and Hamanishi 84] , and consists of four layers, when two values can be abstracted in the k-th layer from the bottom, the distance k/3 (0 \u2264, k \u2264 3) is assigned. The value 0 means that two codes belong to exactly the same category, and 1 means that they are unrelated. The attributes \"writing\" and \"book\" are abstracted by the immediate upper attribute \"document\" and the distance is given as 1/3. Thus, the word \"ronbun{technical paper}\" which has thesaurus code \"writing\", and \"yokoushuu{proceedings}\" which has the thesaurus code \"book\", are assigned a distance of 1/3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 123, |
|
"text": "[Ohno and Hamanishi 84]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 1 Distance between Thesaurus codes", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Transfer knowledge describes the correspondence between source language expressions (SE) and target language expressions (TE) in certain meaningful units, preserving the translational equivalence [Tsujii and Fujita 91] . The condition under which a TE is chosen as a translation result of an SE is associated with the TE. Transfer knowledge in an example-based framework is described as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 218, |
|
"text": "[Tsujii and Fujita 91]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distance calculation in transfer knowledge", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "SE => TE1 (E11.E12,...), : : TEn (Enl, En2,...)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distance calculation in transfer knowledge", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Each TE has several examples as conditions. Eij means the j-th example of TEi. The input is the SE's environment, and the most appropriate TE is selected according to the calculated distance between the input and the examples. The input and examples comprise a set of words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distance calculation in transfer knowledge", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Let us suppose that an input I and each example Eij consist of t elements as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distance calculation in transfer knowledge", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "I = ( I 1 .. It) Eij = (Eijl,...,Eijt)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distance calculation in transfer knowledge", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Then the distance between I and Eij is calculated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distance calculation in transfer knowledge", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The attribute weight Wk expresses the importance of the k-th element in the translation. Wk is given for each Ik by TE's distribution that semantic attribute of Ik brings [Sumita and Iida 91] . The distance from the input is calculated for all examples. Then the example whose distance to the input is least, is detected and the TE which has that example is selected. When Eij is closest to I, TEi is selected as the most plausible TE. The enrichment of examples increases the accuracy of determining the TE because conditions become more detailed. Further, even if there is only one TE, but there is no example close to the input, the application of the transfer knowledge is rejected.", |
|
"cite_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 191, |
|
"text": "[Sumita and Iida 91]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distance calculation in transfer knowledge", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Distance calculation is used to determine which TE has the example that is closest to the input, and can be used in various abstract level expressions depending on how the input words are provided.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transfer knowledge description", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "By the wide application of distance calculation, various levels of knowledge can be provided. TDMT achieves efficient translation by utilizing multi-level knowledge effectively. Pattern-and grammar-level knowledge can be described easily in the example-based framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transfer knowledge description", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Since this kind of knowledge has a condition outside the SE, the cooperation with such as context module is sometimes necessary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "String-level transfer knowledge", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "In some cases, the conditions can be described by the examples of the most closely related word in which the SE is used, as follows: 1 sochira => this ((desu {be} 2 )...), you ((okuru {send})..), it ((miru {see})...)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "String-level transfer knowledge", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Applying this knowledge,\"you\" is selected as the word corresponding to the \"sochira\" in \"sochira ni{particle} tsutaeru\" because of the small distance between \"tsutaeru{convey}\" and \"okuru{send}\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "String-level transfer knowledge", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Pattern-level transfer knowledge involves variables, and the words which are substituted for the variables serve as input and examples for the distance calculation. For example, sentences (1) and (2) have the pattern,\"X o o-negaishimasu\":", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern-level transfer knowledge", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "(1) \"jinjika {personnel section} o o-negaishimasu\" (2) \"daimei {title} o o-negaishimasu\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern-level transfer knowledge", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "To explain the translation of these sentences, it is assumed that the pattern, \"X o o-negaishimasu\" can be translated into either of two English sentences as described by transfer knowledge in the following way:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern-level transfer knowledge", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "1 At present, TDMT prototype system determines a target word with pattern-level transfer knowledge. In (1), the translation \"may I speak to X' \" is selected because of the close distance between \"jimukyoku\" and \"jinjika\" ; in (2), \"please give me X' \" is selected because of the close distance between \"bangou\" and \"daimei\". In this way, the following English translations for (1) and (2) are obtained:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern-level transfer knowledge", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "(1') \"may I speak to the personnel section\" (2') \"please give me the title\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern-level transfer knowledge", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "Grammar-level transfer knowledge is expressed in terms of grammatical categories. Sets of words which are concrete instances of each category, constitute examples. The following transfer knowledge involves sets of three common nouns (CNs):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Grammar-level transfer knowledge", |
|
"sec_num": "2.3.3" |
|
}, |
|
{ |
|
"text": "CN1 CN2 CNS => CN3' of CN1'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Grammar-level transfer knowledge", |
|
"sec_num": "2.3.3" |
|
}, |
|
{ |
|
"text": "((\"kaigi{conference}\", \"kaisai{opening}\", \"kikan{time}\"),...), CN2' CN3' for CN1' ((\"sanka{participation}\", \"moushikomi{application}\", \"youshi{form}\"),...), :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Grammar-level transfer knowledge", |
|
"sec_num": "2.3.3" |
|
}, |
|
{ |
|
"text": "This transfer knowledge allows the following translations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Grammar-level transfer knowledge", |
|
"sec_num": "2.3.3" |
|
}, |
|
{ |
|
"text": "kenkyukai kaisai kikan {workshop, opening, time} -> the time of the workshop happyou moshikomi youshi {presentation, application, form} -> the application form for presentation The above translations select \"CN3' of CN1' \" and \"CN2' CN3' for CN1' \" as the most plausible TE, respectively by the result of distance calculation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Grammar-level transfer knowledge", |
|
"sec_num": "2.3.3" |
|
}, |
|
{ |
|
"text": "TDMT utilizes the example-based framework not only to determine a TE, but also to reduce possible structural ambiguity and to describe analysis knowledge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extension of the Example-based Methodology", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "In the application of transfer knowledge to the input sentence, structural ambiguity may occur. In such cases, the most appropriate structure is selected on the basis of total distance. For example, when the pattern \"X no Y\" is applied to the sentence \"10000 yen no hoteru {hotel} no yoyaku {reservation}\", there are two possible structures:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "(1) 10000 yen no (hoteru no yoyaku) (2) (10000 yen no hoteru) no yoyaku", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The pattern \"X no Y\" has various TEs, such as in the following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "X no Y => Y' of X' (E11,E12, ... ), Y' for X' (E21,E22, ... ), Y' at X' (E31,E32, ... ), X' Y'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "(E41,E42, ... ), : :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The distances \"X no Y\" of (1) are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "10000 yen no (hoteru no yoyaku) => (10000 yen)' (hoteru no yoyaku)' (distance 0.5) hoteru no yoyaku => yoyaku' for hoteru' (distance 0.0)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The total distance for structure (1) is 0.5 and the translation based on structure (1) is (1').", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "(1') 10000 yen reservation for the hotel On the other hand, the distances of (2)'s structure and the translation (2') are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "(10000 yen no hoteru) no yoyaku => yoyaku' for (10000 yen no hoteru)' (distance 0.0) 10000 yen no hoteru => 10000 yen ' hoteru'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "(distance 0.0) (2') reservation for the 10000 yen hotel", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The total distances for structure (1) and (2) are 0.5 and 0.0, respectively. Structure (1) has an unnatural relation between 10000 yen and yoyaku, which makes the total distance higher. Comparing the total distances, (2) is selected as the structure of \"10000 yen no hoteru no yoyaku \".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining structure by total distance", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "For some structurally complex sentences, translations cannot be performed by applying only transfer knowledge. In such cases, analysis knowledge is also required. The analysis module applies analysis knowledge and supplies the resulting information to the transfer module, which then applies transfer knowledge on the basis of that information. The transfer and analysis processes operate autonomously but cooperatively to produce the translation result.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The analysis described in this paper is not the understanding of structure and meaning on the basis of a parsing of the input sentence according to grammar rules, but rather the extraction of the information required to apply transfer knowledge and to produce the correct translation from the input sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Analysis knowledge is described by examples in the same way as transfer knowledge, as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "SE => Revised SEl (E11,E12,... ), : : Revised SEn (Enl, En2,... )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Although the form of knowledge description is virtually the same, transfer knowledge descriptions map onto TEs, whereas analysis knowledge descriptions map onto revised SEs. The analysis knowledge that TDMT provides at present, is as follows, for normalization [Nagao 84 ] and for structuring", |
|
"cite_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 270, |
|
"text": "[Nagao 84", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Normalization is putting together minor colloquial expressions into standard expressions, It leads to robust translation and efficient knowledge storage. Analysis knowledge for normalization is utilized to recover the ellipsis of function words such as particles, and to normalize some variant forms such as sentence-final forms into normal forms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Analysis knowledge for normalization", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Analysis knowledge for normalization also has the advantage of making the scale of knowledge more economical and the translation processing more robust.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Analysis knowledge for normalization", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Structuring is recognizing the structure components by insertion of a marker in order to apply transfer knowledge to each structure component. Analysis knowledge for structuring is applied to detect special linguistic phenomena such as adnominal expressions, wh-expressions, and discontinuities, so as to assign a structure to the SE.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Analysis knowledge for structuring", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Analysis knowledge helps the application of transfer knowledge to the input sentence. The sentence \"watakushi wa Suzuki desu {I, particle, Suzuki, complementizer}\" is translated into \"I am Suzuki\" by applying transfer knowledge such as the following: X wa Y desu => X' be Y' However, in spoken Japanese, particles are frequently omitted. The sentence \"watakushi Suzuki desu\" is natural spoken-Japanese. It is normalized to \"watakushi wa Suzuki desu\", which has the omitted particle \"wa\" recovered, by applying the following analysis knowledge:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Analysis knowledge for structuring", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Pronoun Proper-Noun => Pronoun wa Proper-Noun (a set of examples)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Analysis knowledge for structuring", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The analysis module sends the information about the application of the analysis knowledge to the transfer module. The transfer module receives the information and applies the transfer knowledge to produce the English sentence \"I am Suzuki\" By examples, this kind of analysis knowledge can also classify the particles to be recovered as shown below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Analysis knowledge for structuring", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "CN Verb => CN o Verb ((hoteru{hotel},yoyaku-suru{reserve}),...),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Analysis knowledge for structuring", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "CN ni Verb ((kaigi{conference},sanka-suru{participate}),...), : :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Analysis knowledge for structuring", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This analysis knowledge allows the recovery of various particles such as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Analysis knowledge for structuring", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"hoteru yoyaku-suru\" -> \"hoteru o yoyaku-suru\" \"kaigi sanka-suru\" -> \"kaigi ni sanka-suru\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Analysis knowledge for structuring", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Japanese and English have different constraints for generation. To produce correct English output, therefore, generation constraints are given to a TE part of transfer knowledge. For instance, while the case order in Japanese is relatively free, it is constrained in English. TDMT transfer knowledge about a case relation such as subject and object, is attached to a TE. Table 2 Causes of Incorrect Sentences (total number of incorrect sentences -25) occurrences (1) inability to get such TEs as elided objects 9 (2) selection of incorrect TEs 8 (3) error in adverb position 4 (4) incorrect declension 1 (5) incorrect tense 1 (6) etc 2", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 371, |
|
"end": 378, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Incorporation of generation information into transfer knowledge", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The second factor shows that an elaboration of distance calculation and an enrichment of examples are needed. The first, third, and fourth factors are caused by the shortage of generation knowledge. The fifth factor is caused by the shortage of analysis knowledge. These facts show that the cooperative control that flexibly communicates various kinds of knowledge including context and generation knowledge, and various kinds of framework such as a rule-based and a statistical framework are necessary to achieve efficient and robust machine translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transfer-Driven Machine Translation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "This section discusses the advantages and problems of the example-based framework, and the role of TDMT about the example-based framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussions", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "An example-based framework has the following advantages, especially for spoken-language translation, which demands efficient and robust processing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Advantages", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In spoken-language, there are many expressions which deviate from conventional grammar. An examplebased framework can validate them because of statistical evidence. Also, spoken-language processing systems sometimes have to handle imprecise or incorrect sentence recognized by speech devices. An example-based frameworks provide fail-safe translation [Slocum 84 ] because they utilize a best-matching mechanism rather than an exact-matching mechanism.", |
|
"cite_spans": [ |
|
{ |
|
"start": 351, |
|
"end": 361, |
|
"text": "[Slocum 84", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 robustness", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "When the system produces an incorrect result, it can easily learn the correct translation by the addition of a new example.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 improvement", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The distance is calculated quickly because it utilizes a simple mechanism. The system can therefore translate quickly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 efficiency", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A cooperating mechanism can be achieved, with a consistent example-based framework that places a wellbalanced load on each process.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 load balance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There is fear that building a large-scale knowledge in a example-based framework will be too expensive. TDMT knowledge is built from a bilingual corpus. Unless the linguistic correspondences between two languages are supplied, the task of building knowledge becomes laborious, and the effectiveness of the knowledge cannot be guaranteed based on statistical evidence. Fortunately, our corpus meets this requirement, and various knowledge in TDMT can be built based on statistical investigation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problems", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "An example-based framework never contradicts other frameworks such as a rule-based and a statisticallybased framework, nor is it difficult to integrate it with them. TDMT does not process only by example-based framework, but enhances the advantage of an example-based framework in cooperation with other mechanisms. For instance, the incorporation of generation information into transfer knowledge makes a generation result correct, and in some cases the context module supplies the condition for determining a target expression. Thus, the example-based framework can be strengthened in TDMT by also utilizing a rule-based or statistical framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Role of TDMT", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "We have discussed the use of an example-based framework in Transfer-Driven Machine Translation. By making the best use of the example-based framework, TDMT performs efficient and robust translation of spoken-language. The feasibility of TDMT and the effectiveness of the example-based framework has been confirmed with a prototype Japanese-to-English translation system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concluding Remarks", |
|
"sec_num": "7." |
|
}, |
|
{ |
|
"text": "Future work will introduce a cooperative mechanism among the various kinds of framework, such as example-based, rule-based, and statistically-based into TDMT using a parallel computer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concluding Remarks", |
|
"sec_num": "7." |
|
}, |
|
{ |
|
"text": "X' is the transferred expression of X", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "TDMT performs efficient and robust spoken-language translation using various kinds of strategies to be able to treat diverse input. It has the following characteristics:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "annex", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In TDMT, transfer knowledge consists of various levels of bilingual information. It is the primary knowledge used to solve translation problems. The transfer module retrieves the necessary transfer knowledge ranging from global unit like sentence structures to local unit like words, and utilizes analysis knowledge (syntactic/semantic information) which helps to apply transfer knowledge to the input Generation and context knowledge which are utilized to produce correct translations. Namely TDMT produces translation results by utilizing these kinds of knowledge cooperatively, centered on the transfer process, and achieves efficient translation according to the nature of the input", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022Transfer-centered cooperation mechanism", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Transfer knowledge is the basic data used for controlling the translation process. The distance is calculated quickly because of its simple mechanism. Most of the TDMT transfer knowledge is now described in the example-based framework. Through providing examples, TDMT describes various kinds and levels of knowledge and reduce possible structural ambiguities by total distance calculation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022Effective utilization of example-based framework", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "TDMT provides multi-level transfer knowledge, with each level corresponding to each translation strategy. In the transfer knowledge of the TDMT prototype system, there is string-, pattern-and grammar-level knowledge. TDMT achieves efficient translation by utilizing multi-level knowledge effectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022Multi-level knowledge", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A prototype Japanese to English system constructed to confirm the feasibility and effectiveness of TDMT is running on a Genera 8.1 LISP machine [Furuse and Iida 92] .Due to the restriction of the sequential mechanism, a method for driving the necessary process at the required time has not been completely achieved. However, the following control mechanism is used to obtain the most efficient processing possible.\u2022 As much as possible, translation is attempted by first applying only transfer knowledge; when this fails, the system tries to apply analysis knowledge.\u2022 Transfer knowledge is applied at the most concrete level as possible, that is, in the order of string, pattern, and grammar level.In order to achieve flexible processing which exchanges necessary translation information, a parallel implementation is under study based on the results from the prototype system.Appendix A shows the translation of \"Kaigi ni moushikomi tai no desu ga\" by only transfer knowledge. Appendix B shows the translation of \"Sanka-ryo wa dono youni o shiharai shi tara yoi no desu ka\" by transfer knowledge and analysis knowledge. To translate the latter sentence, it is necessary to detect a wh-and a modality-component by applying analysis knowledge.", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 164, |
|
"text": "[Furuse and Iida 92]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Control of translation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The effectiveness of transfer knowledge and analysis knowledge is an important problem in TDMT. The authors are investigating a corpus of about 270,000 words in the domain of inquiries concerning international conferences for a prototype translation system. The frequencies of occurrence of these Japanese and English sentences in the corpus were determined and analyzed, and transfer knowledge and analysis knowledge to cover the domain are being compiled. The ATR corpus was constructed for the purpose of analyzing linguistic phenomena and gathering statistical information. It includes the results of morphological and syntactical analysis for each sentence, and the English equivalents [Ehara et al 90] .The top ten sentences cover 22% of the corpus, and the top ten sentence patterns cover 35% [Furuse et al. 90] . This can support the assumption that expressions are covered at a high rate by string-and pattern-level, and the effectiveness of these kinds of knowledge in translation.The system is presently utilizing the following types of data.\u2022 string-level transfer knowledge (about 500 items of knowledge) top 50 frequent sentences frequent compound nouns such as \"kaigi jimukyoku => conference office\" bilingual information of words such as \"eigo => English\" \u2022 pattern-level transfer knowledge (about 300 items) frequent sentence patterns \"A particle B\" patterns such as \"A no B\" \u2022 grammar-level transfer knowledge (about 20 items) continuation of nouns \u2022 analysis knowledge (about 50 items) normalization, adnominal sentence, wh-case, modality, adverb", |
|
"cite_spans": [ |
|
{ |
|
"start": 691, |
|
"end": 707, |
|
"text": "[Ehara et al 90]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 800, |
|
"end": 818, |
|
"text": "[Furuse et al. 90]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge base", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The prototype system was evaluated by translation of model conversations related to inquiries concerning international conferences. The model conversations consist of 10 dialogues which comprise 225 sentences. They cover most of the basic expressions of the domain, and were constructed independently of the ATR corpus, which provides the basis for the transfer knowledge and analysis knowledge.The transfer and analysis knowledge that is needed to translate the model conversations is shown in Table 1 . The table shows that string-and pattern-level transfer knowledge can cover a wide range of sentences, and efficient translation requires the selective use of concrete knowledge concerning surface forms and abstract knowledge concerning grammar. At present, the prototype system can produce output quickly by the example-based framework. 200 of the sentences are correct, providing a success rate of 88.9% for all the sentences. The coverage by string-and pattern-level knowledge is wider than expected. Table 2 shows the main causes of incorrect sentences.Appendix A Translation by only transfer knowledge Appendix B Translation by transfer knowledge and analysis knowledge", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 495, |
|
"end": 503, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1009, |
|
"end": 1016, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5." |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "ATR Dialogue Database", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ehara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Ogura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Morimoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Proc. of International Conference on Spoken Language Processing", |
|
"volume": "90", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "References [Ehara et al. 90] Ehara, T., Ogura, K., and Morimoto, T.: ATR Dialogue Database, Proc. of International Conference on Spoken Language Processing 90, (1990).", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "H : A Method for Realizing Transfer-Driven Machine Translation, Reprint of WGNL 80-8", |
|
"authors": [], |
|
"year": 1990, |
|
"venue": "IPSJ", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "et al. 90] Furuse, O., Sumita, E., and Iida, H : A Method for Realizing Transfer-Driven Machine Translation, Reprint of WGNL 80-8, IPSJ, (1990), (in Japanese).", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "A framework of a mechanical translation between Japanese and English by analogy principle", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Nagao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1984, |
|
"venue": "Artificial and Human Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "173--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nagao, M.: A framework of a mechanical translation between Japanese and English by analogy principle, in Artificial and Human Intelligence, ed. Elithorn, A. and Banerji, R., North-Holland , pp.173-180, (1984).", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Working with Analogical Semantics", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Sadler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "89] Sadler, V.: Working with Analogical Semantics, Foris Publications (1989).", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Toward Memory-Based Translation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Sato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Nagao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Proc. of Coling '90", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "and Nagao 90] Sato, S. and Nagao M. : Toward Memory-Based Translation, Proc. of Coling '90, (1990).", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Machine Translation today: the state of the art", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Slocum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1984, |
|
"venue": "Proc. of the third Lugano Tutorial", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "319--350", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Slocum, J. : METAL: the LRC Machine Translation system, in \"Machine Translation today: the state of the art\", Proc. of the third Lugano Tutorial, 2-7 (1984), M.King, ed., Edinburgh University Press, pp. 319-350, (1987).", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Experiments and Prospects of Example-based Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Sumita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Iida", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "Proc. of the 29th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "and Iida 91] Sumita, E., and Iida, H. : Experiments and Prospects of Example-based Machine Translation, Proc. of the 29th Annual Meeting of the Association for Computational Linguistics, (1991).", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Lexical Transfer based on Bi-Lingual Signs -Towards Interaction during Transfer", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Fujita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "Proc. of the 5th Conference of the European Chapter", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "and Fujita 91] Tsujii, J. and Fujita, K. : Lexical Transfer based on Bi-Lingual Signs -Towards Interaction during Transfer. Proc. of the 5th Conference of the European Chapter of the Association for Computational Linguistics, (1991).", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "2 {wl... wn) is the list of corresponding English words.X o o-negaishimasu => may I speak to X' 3 ((jimukyoku{office}),...), please give me X' ((bangou{number}),...)" |
|
} |
|
} |
|
} |
|
} |