|
{ |
|
"paper_id": "N13-1023", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:39:42.606601Z" |
|
}, |
|
"title": "Segmentation Strategies for Streaming Speech Translation", |
|
"authors": [ |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "AT&T Labs -Research", |
|
"location": { |
|
"addrLine": "180 Park Avenue, Florham Park", |
|
"postCode": "07932", |
|
"region": "NJ" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rangarajan", |
|
"middle": [], |
|
"last": "Sridhar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "AT&T Labs -Research", |
|
"location": { |
|
"addrLine": "180 Park Avenue, Florham Park", |
|
"postCode": "07932", |
|
"region": "NJ" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "AT&T Labs -Research", |
|
"location": { |
|
"addrLine": "180 Park Avenue, Florham Park", |
|
"postCode": "07932", |
|
"region": "NJ" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Srinivas", |
|
"middle": [], |
|
"last": "Bangalore", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "AT&T Labs -Research", |
|
"location": { |
|
"addrLine": "180 Park Avenue, Florham Park", |
|
"postCode": "07932", |
|
"region": "NJ" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Andrej", |
|
"middle": [], |
|
"last": "Ljolje", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "AT&T Labs -Research", |
|
"location": { |
|
"addrLine": "180 Park Avenue, Florham Park", |
|
"postCode": "07932", |
|
"region": "NJ" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Rathinavelu", |
|
"middle": [], |
|
"last": "Chengalvarayan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "AT&T Labs -Research", |
|
"location": { |
|
"addrLine": "180 Park Avenue, Florham Park", |
|
"postCode": "07932", |
|
"region": "NJ" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The study presented in this work is a first effort at real-time speech translation of TED talks, a compendium of public talks with different speakers addressing a variety of topics. We address the goal of achieving a system that balances translation accuracy and latency. In order to improve ASR performance for our diverse data set, adaptation techniques such as constrained model adaptation and vocal tract length normalization are found to be useful. In order to improve machine translation (MT) performance, techniques that could be employed in real-time such as monotonic and partial translation retention are found to be of use. We also experiment with inserting text segmenters of various types between ASR and MT in a series of real-time translation experiments. Among other results, our experiments demonstrate that a good segmentation is useful, and a novel conjunction-based segmentation strategy improves translation quality nearly as much as other strategies such as comma-based segmentation. It was also found to be important to synchronize various pipeline components in order to minimize latency.", |
|
"pdf_parse": { |
|
"paper_id": "N13-1023", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The study presented in this work is a first effort at real-time speech translation of TED talks, a compendium of public talks with different speakers addressing a variety of topics. We address the goal of achieving a system that balances translation accuracy and latency. In order to improve ASR performance for our diverse data set, adaptation techniques such as constrained model adaptation and vocal tract length normalization are found to be useful. In order to improve machine translation (MT) performance, techniques that could be employed in real-time such as monotonic and partial translation retention are found to be of use. We also experiment with inserting text segmenters of various types between ASR and MT in a series of real-time translation experiments. Among other results, our experiments demonstrate that a good segmentation is useful, and a novel conjunction-based segmentation strategy improves translation quality nearly as much as other strategies such as comma-based segmentation. It was also found to be important to synchronize various pipeline components in order to minimize latency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The quality of automatic speech-to-text and speechto-speech (S2S) translation has improved so significantly over the last several decades that such systems are now widely deployed and used by an increasing number of consumers. Under the hood, the individual components such as automatic speech recognition (ASR), machine translation (MT) and text-tospeech synthesis (TTS) that constitute a S2S system are still loosely coupled and typically trained on disparate data and domains. Nevertheless, the models as well as the pipeline have been optimized in several ways to achieve tasks such as high quality offline speech translation (Cohen, 2007; Kingsbury et al., 2011; Federico et al., 2011) , on-demand web based speech and text translation, low-latency real-time translation (Wahlster, 2000; Hamon et al., 2009; Bangalore et al., 2012) , etc. The design of a S2S translation system is highly dependent on the nature of the audio stimuli. For example, talks, lectures and audio broadcasts are typically long and require appropriate segmentation strategies to chunk the input signal to ensure high quality translation. In contrast, single utterance translation in several consumer applications (apps) are typically short and can be processed without the need for additional chunking. Another key parameter in designing a S2S translation system for any task is latency. In offline scenarios where high latencies are permitted, several adaptation strategies (speaker, language model, translation model), denser data structures (Nbest lists, word sausages, lattices) and rescoring procedures can be utilized to improve the quality of end-to-end translation. On the other hand, realtime speech-to-text or speech-to-speech translation demand the best possible accuracy at low latencies such that communication is not hindered due to potential delay in processing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 630, |
|
"end": 643, |
|
"text": "(Cohen, 2007;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 644, |
|
"end": 667, |
|
"text": "Kingsbury et al., 2011;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 690, |
|
"text": "Federico et al., 2011)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 776, |
|
"end": 792, |
|
"text": "(Wahlster, 2000;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 793, |
|
"end": 812, |
|
"text": "Hamon et al., 2009;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 813, |
|
"end": 836, |
|
"text": "Bangalore et al., 2012)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we focus on the speech translation of talks. We investigate the tradeoff between accuracy and latency for both offline and real-time translation of talks. In both these scenarios, appropriate segmentation of the audio signal as well as the ASR hypothesis that is fed into machine translation is critical for maximizing the overall translation quality of the talk. Ideally, one would like to train the models on entire talks. However, such corpora are not available in large amounts. Hence, it is necessary to con-form to appropriately sized segments that are similar to the sentence units used in training the language and translation models. We propose several nonlinguistic and linguistic segmentation strategies for the segmentation of text (reference or ASR hypotheses) for machine translation. We address the problem of latency in real-time translation as a function of the segmentation strategy; i.e., we ask the question \"what is the segmentation strategy that maximizes the number of segments while still maximizing translation accuracy?\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Speech translation of European Parliamentary speeches has been addressed as part of the TC-STAR project (Vilar et al., 2005; F\u00fcgen et al., 2006) . The project focused primarily on offline translation of speeches. Simultaneous translation of lectures and speeches has been addressed in (Hamon et al., 2009; . However, the work focused on a single speaker in a limited domain. Offline speech translation of TED 1 talks has been addressed through the IWSLT 2011 and 2012 evaluation tracks. The talks are from a variety of speakers with varying dialects and cover a range of topics. The study presented in this work is the first effort on real-time speech translation of TED talks. In comparison with previous work, we also present a systematic study of the accuracy versus latency tradeoff for both offline and real-time translation on the same dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 124, |
|
"text": "(Vilar et al., 2005;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 125, |
|
"end": 144, |
|
"text": "F\u00fcgen et al., 2006)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 285, |
|
"end": 305, |
|
"text": "(Hamon et al., 2009;", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Various utterance segmentation strategies for offline machine translation of text and ASR output have been presented in (Cettolo and Federico, 2006; Rao et al., 2007; Matusov et al., 2007) . The work in also examines the impact of segmentation on offline speech translation of talks. However, the realtime analysis in that work is presented only for speech recognition. In contrast with previous work, we tackle the latency issue in simultaneous translation of talks as a function of segmentation strategy and present some new linguistic and non-linguistic methodologies. We investigate the accuracy versus latency tradeoff across translation of reference text, utterance segmented speech recognition output and", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 148, |
|
"text": "(Cettolo and Federico, 2006;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 166, |
|
"text": "Rao et al., 2007;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 167, |
|
"end": 188, |
|
"text": "Matusov et al., 2007)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The basic problem of text translation can be formulated as follows. Given a source (French) sentence", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "f = f J 1 = f 1 , \u2022 \u2022 \u2022 , f J , we aim to translate it into target (English) sentence\u00ea =\u00ea I 1 =\u00ea 1 , \u2022 \u2022 \u2022 ,\u00ea I .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "e(f ) = arg max e Pr(e|f )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "If, as in talks, the source text (reference or ASR hypothesis) is very long, i.e., J is large, we attempt to break down the source string into shorter sequences,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "S = s 1 \u2022 \u2022 \u2022 s k \u2022 \u2022 \u2022 s Qs , where each sequence s k = [f j k f j k +1 \u2022 \u2022 \u2022 f j (k+1) \u22121 ], j 1 = 1, j Qs+1 = J + 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Let the translation of each foreign sequence s k be denoted by", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "t k = [e i k e i k +1 \u2022 \u2022 \u2022 e i (k+1) \u22121 ], i 1 = 1, i Qs+1 = I + 1 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The segmented sequences can be translated using a variety of techniques such as independent chunk-wise translation or chunk-wise translation conditioned on history as shown in Eqs. 2 and 3, respectively. In Eq. 3, t * i denotes the best translation for source sequence s i . e(f ) = arg max", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "t 1 Pr(t 1 |s 1 ) \u2022 \u2022 \u2022 arg max t k Pr(t k |s k ) (2) e(f ) = arg max t 1 Pr(t 1 |s 1 ) arg max t 2 Pr(t 2 |s 2 , s 1 , t * 1 ) \u2022 \u2022 \u2022 arg max t k Pr(t k |s 1 , \u2022 \u2022 \u2022 , s k , t * 1 , \u2022 \u2022 \u2022 , t * k\u22121 )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Problem Formulation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Typically, the hypothesis\u00ea will be more accurate than\u00ea for long texts as the models approximating Pr(e|f ) are conventionally trained on short text segments. In Eqs. 2 and 3, the number of sequences Q s is inversely proportional to the time it takes to generate partial target hypotheses. Our main focus in this work is to obtain a segmentation S such that the quality of translation is maximized with minimal latency. The above formulation for automatic speech recognition is very similar except that the foreign stringf ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "=f J 1 =f 1 , \u2022 \u2022 \u2022 ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this work, we focus on the speech translation of TED talks, a compendium of public talks from several speakers covering a variety of topics. Over the past couple of years, the International Workshop on Spoken Language Translation (IWSLT) has been conducting the evaluation of speech translation on TED talks for English-French. We leverage the IWSLT TED campaign by using identical development (dev2010) and test data (tst2010). However, English-Spanish is our target language pair as our internal projects are cater mostly to this pair. As a result, we created parallel text for English-Spanish based on the reference English segments released as part of the evaluation (Cettolo et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 674, |
|
"end": 696, |
|
"text": "(Cettolo et al., 2012)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We also harvested the audio data from the TED website for building an acoustic model. A total of 1308 talks in English were downloaded, out of which we used 1119 talks recorded prior to December 2011. We split the stereo audio file and duplicated the data to account for any variations in the channels. The data for the language models was also restricted to that permitted in the IWSLT 2011 evaluation. The parallel text for building the English-Spanish translation model was obtained from several corpora: Europarl (Koehn, 2005) , JRC-Acquis corpus (Steinberger et al., 2006) , Opensubtitle corpus (Tiedemann and Lars Nygaard, 2004) , Web crawling (Rangarajan Sridhar et al., 2011) as well as human translation of proprietary data. Table 1 summarizes the data used in building the models. It is important to note that the IWSLT evaluation on TED talks is completely offline. In this work, we perform the first investigation into the real-time translation of these talks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 517, |
|
"end": 530, |
|
"text": "(Koehn, 2005)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 551, |
|
"end": 577, |
|
"text": "(Steinberger et al., 2006)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 600, |
|
"end": 634, |
|
"text": "(Tiedemann and Lars Nygaard, 2004)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 662, |
|
"end": 683, |
|
"text": "Sridhar et al., 2011)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 734, |
|
"end": 741, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this section, we describe the acoustic, language and translation models used in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Speech Translation Models", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We use the AT&T WATSON SM speech recognizer (Goffin et al., 2004) . The speech recognition component consisted of a three-pass decoding approach utilizing two acoustic models. The models used three-state left-to-right HMMs representing just over 100 phonemes. The phonemes represented general English, spelled letters and head-body-tail representation for the eleven digits (with \"zero\" and \"oh\"). The pronunciation dictionary used the appropriate phoneme subset, depending on the type of the word. The models had 10.5k states and 27k HMMs, trained on just over 300k utterances, using both of the stereo channels. The baseline model training was initialized with several iterations of ML training, including two builds of context dependency trees, followed by three iterations of Minimum Phone Error (MPE) training.", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 65, |
|
"text": "(Goffin et al., 2004)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic and Language Model", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The Vocal Tract Length Normalization (VTLN) was applied in two different ways. One was estimated on an utterance level, and the other at the talk level. No speaker clustering was attempted in training. The performance at test time was comparable for both approaches on the development set. Once the warps were estimated, after five iterations, the ML trained model was updated using MPE training. Constrained model adaptation (CMA) was applied to the warped features and the adapted features were recognized in the final pass with the VTLN model. All the passes used the same LM. For offline recognition the warps, and the CMA adaptation, are performed at the talk level. For the real-time speech translation experiments, we used the VTLN model. The English language model was built using the permissible data in the IWSLT 2011 evaluation. The texts were normalized using a variety of cleanup, number and spelling normalization techniques and filtered by restricting the vocabulary to the top 375000 types; i.e., any sentence containing a token outside the vocabulary was discarded. First, we removed extraneous characters beyond the ASCII range followed by removal of punctuations. Subsequently, we normalized hyphenated words and removed words with more than 25 characters. The resultant text was normalized using a variety of number conversion routines and each corpus was filtered by restricting the vocabulary to the top 150000 types; i.e., any sentence containing a token outside the vocabulary was discarded. The vocabulary from all the corpora was then consolidated and another round of filtering to the top 375000 most frequent types was performed. The OOV rate on the TED dev2010 set is 1.1%. We used the AT&T FSM toolkit (Mohri et al., 1997) to train a trigram language model (LM) for each component (corpus). Finally, the component language models were interpolated by minimizing the perplexity on the dev2010 set. The results are shown in Table 2 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 1732, |
|
"end": 1752, |
|
"text": "(Mohri et al., 1997)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1952, |
|
"end": 1959, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Acoustic and Language Model", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We used the Moses toolkit (Koehn et al., 2007) for performing statistical machine translation. Minimum error rate training (MERT) was performed on the development set (dev2010) to optimize the feature weights of the log-linear model used in translation. During decoding, the unknown words were preserved in the hypotheses. The data used to train the model is summarized in Table 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 46, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 373, |
|
"end": 380, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Translation Model", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We also used a finite-state implementation of translation without reordering. Reordering can pose a challenge in real-time S2S translation as the textto-speech synthesis is monotonic and cannot retract already synthesized speech. While we do not address the text-to-speech synthesis of target text in this work, we perform this analysis as a precursor to future work. We represent the phrase translation table as a weighted finite state transducer (FST) and the language model as a finite state acceptor (FSA). The weight on the arcs of the FST is the dot product of the MERT weights with the translation scores. In addition, a word insertion penalty was also applied to each word to penalize short hypotheses. The decoding process consists of composing all possible segmentations of an input sentence with the phrase table FST and language model, followed by searching for the best path. Our FST-based translation is the equivalent of phrase-based translation in Moses without reordering. We present results using the independent chunk-wise strategy and chunk-wise translation conditioned on history in Table 3. The chunk-wise translation conditioned on history was performed using the continue-partialtranslation option in Moses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation Model", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The output of ASR for talks is a long string of words with no punctuation, capitalization or segmentation markers. In most offline ASR systems, the talk is first segmented into short utterance-like audio segments before passing them to the decoder. Prior work has shown that additional segmentation of ASR hypotheses of these segments may be necessary to improve translation quality (Rao et al., 2007; Matusov et al., 2007) . In a simultaneous speech translation system, one can neither find the optimal segmentation of the entire talk nor tolerate high latencies associated with long segments. Consequently, it is necessary to decode the incoming audio incrementally as well as segment the ASR hypotheses appropriately to maximize MT quality. We present a variety of linguistic and non-linguistic segmentation strategies for segmenting the source text input into MT. In our experiments, they are applied to different inputs including reference text, ASR 1best hypothesis for manually segmented audio and incremental ASR hypotheses from entire talks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 383, |
|
"end": 401, |
|
"text": "(Rao et al., 2007;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 423, |
|
"text": "Matusov et al., 2007)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Segmentation Strategies", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The simplest method is to segment the incoming text according to length in number of words. Such a procedure can destroy semantic context but has little to no overhead in additional processing. We experiment with segmenting the text according to word window sizes of length 4, 8, 11, and 15 (denoted as data sets win4, win8, win11, win15, respectively in Table 3 ). We also experiment with concatenating all of the text from one TED talk into a single chunk (complete talk).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 355, |
|
"end": 362, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Non-linguistic segmentation", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "A novel hold-output model was also developed in order to segment the input text. Given a pair of parallel sentences, the model segments the source sentence into minimally sized chunks such that crossing links and links of one target word to many source words in an optimal GIZA++ alignment (Och and Ney, 2003) occur only within individual chunks. The motivation behind this model is that if a segment s 0 is input at time t 0 to an incremental MT system, it can be translated right away without waiting for a segment s i that is input at a later time t i , t i > 0. The hold-output model detects these kinds of segments given a sequence of English words that are input from left to right. A kernel-based SVM was used to develop this model. It tags a token t in the input with either the label HOLD, meaning to chunk it with the next token, or the label OUTPUT, meaning to output the chunk constructed from the maximal consecutive sequence of tokens preceding t that were all tagged as HOLD. The model considers a five word and POS window around the target token t. Unigram, bigram, and trigram word and POS features based upon this window are used for classification. Training and development data for the model was derived from the English-Spanish TED data (see Table 1 ) after running it through GIZA++. Accuracy of the model on the development set was 66.62% F-measure for the HOLD label and 82.75% for the OUTPUT label.", |
|
"cite_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 309, |
|
"text": "(Och and Ney, 2003)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1263, |
|
"end": 1270, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Non-linguistic segmentation", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Since MT models are trained on parallel text sentences, we investigate segmenting the source text into sentences. We also investigate segmenting the text further by predicting comma separated chunks within sentences. These tasks are performed by training a kernel-based SVM (Haffner et al., 2003) on a subset of English TED data. This dataset contained 1029 human-transcribed talks consisting of about 103,000 sentences containing about 1.6 million words. Punctuation in this dataset was normalized as follows. Different kinds of sentence ending punctuations were transformed into a uniform end of sentence marker. Double-hyphens were transformed into commas. Commas already existing in the input were kept while all other kinds of punctuation symbols were deleted. A part of speech (POS) tagger was applied to this input. For speed, a unigram POS tagger was implemented which was trained on the Penn Treebank (Marcus et al., 1993) and used orthographic features to predict the POS of unknown words. The SVM-based punctuation classifier relies on a five word and POS window in order to classify the target word. Specifically, token t 0 is classified given as input the window t \u22122 t \u22121 t o t 1 t 2 . Unigram, bigram, and trigram word and POS features based on this window were used for classification. Accuracy of the classifier on the development set was 60.51% F-measure for sentence end detection and 43.43% F-measure for comma detection. Subsequently, data sets pred-sent (sentences) and pred-punct (commaseparated chunks) were obtained. Corresponding to these, two other data sets ref-sent and ref-punct were obtained based upon gold-standard punctuations in the reference.", |
|
"cite_spans": [ |
|
{ |
|
"start": 274, |
|
"end": 296, |
|
"text": "(Haffner et al., 2003)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 910, |
|
"end": 931, |
|
"text": "(Marcus et al., 1993)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Linguistic segmentation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Besides investigating the use of comma-separated segments, we investigated other linguistically motivated segments. These included conjunction-word based segments. These segments are separated at either conjunction (e.g. \"and,\" \"or\") or sentenceending word boundaries. Conjunctions were identified using the unigram POS tagger. F-measure performance for detecting conjunctions by the tagger on the development set was quite high, 99.35%. As an alternative, text chunking was performed within each sentence, with each chunk corresponding to one segment. Text chunks are non-recursive syntactic phrases in the input text. We investigated segmenting the source into text chunks using TreeTagger, a decision-tree based text chunker (Schmid, 1994) . Initial sets of text chunks were created by using either gold-standard sentence boundaries or boundaries detected using the punctuation classifier, yielding the data sets chunk-ref- Table 3 : BLEU scores at the talk level for reference text and ASR 1-best for various segmentation strategies. The ASR 1-best was performed on manually segmented audio chunks provided in tst2010 set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 728, |
|
"end": 742, |
|
"text": "(Schmid, 1994)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 927, |
|
"end": 934, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Linguistic segmentation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "punct and chunk-pred-punct. Chunk types included NC (noun chunk), VC (verb chunk), PRT (particle), and ADVC (adverbial chunk). Because these chunks may not provide sufficient context for translation, we also experimented with concatenating neighboring chunks of certain types to form larger chunks. Data sets lgchunk1 concatenate together neighboring chunk sequences of the form NC, VC or NC, ADVC, VC, intended to capture as single chunks instances of subject and verb. In addition to this, data sets lgchunk2 capture chunks such as PC (prepositional phrase) and VC followed by VC (control and raising verbs). Finally, data sets lgchunk3 capture as single chunks VC followed by NC and optionally followed by PRT (verb and its direct object).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Linguistic segmentation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Applying the conjunction segmenter after the aforementioned punctuation classifier in order to detect the ends of sentences yields the data set conjpred-eos. Applying it on sentences derived from the gold-standard punctuations yields the data set conjref-eos. Finally, applying the hold-output model to sentences derived using the punctuation classifier produces the data set pred-hold. Obtaining English sentences tagged with HOLD and OUTPUT directly from the output of GIZA++ on English-Spanish sentences in the reference produces the data set ref-hold. The strategies containing the keyword ref for ASR simply means that the ASR hypotheses are used in place of the gold reference text. Figure 1 : Latencies and BLEU scores for tst2010 set using incremental ASR decoding and translation", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 689, |
|
"end": 697, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Linguistic segmentation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We also performed real-time speech translation by using incremental speech recognition, i.e., the decoder returns partial hypotheses that, independent of the pruning during search, will not change in the future. Figure 1 shows the plot for two scenarios: one in which the partial hypotheses are sent directly to machine translation and another where the best segmentation strategy pred-punct is used to segment the partial output before sending it to MT. The plot shows the BLEU scores as a function of ASR timeouts used to generate the partial hypotheses. Figure 1 also shows the average latency involved in incremental speech translation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 212, |
|
"end": 220, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 557, |
|
"end": 563, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Linguistic segmentation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "The BLEU scores for the segmentation strategies over ASR hypotheses was computed at the talk level. Since the ASR hypotheses do not align with the reference source text, it is not feasible to evaluate the translation performance using the gold reference. While other studies have used an approximate edit distance algorithm for resegmentation of the hypotheses , we simply concatenate all the segments and perform the evaluation at the talk level.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The hold segmentation strategy yields the poorest translation performance. The significant drop in BLEU score can be attributed to relatively short segments (2-4 words) that was generated by the model. The scheme oversegments the text and since the translation and language models are trained on sentence like chunks, the performance is poor. For example, the input text the sea should be translated as el mar, but instead the hold segmenter chunks it as the\u2022sea which MT's chunk translation renders as el\u2022el mar. It will be interesting to increase the span of the hold strategy to subsume more contiguous sequences and we plan to investigate this as part of future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The chunk segmentation strategy yields quite poor translation performance. In general, it does not make the same kinds of errors that the hold strategy makes; for example, the input text the sea will be treated as one NC chunk by the chunk segmentation strategy, leading MT to translate it correctly as el mar. The short chunk sizes of chunk lead to other kinds of errors. For example, the input text we use will be chunked into the NC we and the VC use, which will be translated incorrectly as nosotros\u2022usar; the infinitive usar is se-lected rather than the properly conjugated form usamos. However, there is a marked improvement in translation accuracy with increasingly larger chunk sizes (lgchunk1, lgchunk2, and lgchunk3). Notably, lgchunk3 yields performance that approaches that of win8 with a chunk size that is one third of win8's.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The conj-pred-eos and pred-punct strategies work the best, and it can be seen that the average segment length (8-12 words) generated in both these schemes is very similar to that used for training the models. It is also about the average latency (4-5 seconds) that can be tolerated in cross-lingual communication, also known as ear-voice span (Lederer, 1978) . The non-linguistic segmentation using fixed word length windows also performs well, especially for the longer length windows. However, longer windows (win15) increase the latency and any fixed length window typically destroys the semantic context. It can also be seen from Table 3 that translating the complete talk is suboptimal in comparison with segmenting the text. This is primarily due to bias on sentence length distributions in the training data. Training models on complete talks is likely to resolve this issue. Contrasting the use of reference segments as input to MT (ref-sent, ref-punct, conjref-eos) versus the use of predicted segments (predsent, pred-punct, conj-pred-eos, respectively), it is interesting to note that the MT accuracies never differed greatly between the two, despite the noise in the set of predicted segments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 343, |
|
"end": 358, |
|
"text": "(Lederer, 1978)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 937, |
|
"end": 974, |
|
"text": "MT (ref-sent, ref-punct, conjref-eos)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 634, |
|
"end": 641, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The performance of the real-time speech translation of TED talks is much lower than the offline scenario. First, we use only a VTLN model as performing CMA adaptation in a real-time scenario typically increases latency. Second, the ASR language model is trained on sentence-like units and decoding the entire talk with this LM is not optimal. A language model trained on complete talks will be more appropriate for such a framework and we are investigating this as part of current work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Comparing the accuracies of different speech translation strategies, Table 3 shows that pred-punct performs the best. When embedded in an incremental MT speech recognition system, Figure 1 shows that it is more accurate than the system that sends partial ASR hypotheses directly to MT. This advantage decreases, however, when the ASR timeout parameter is increased to more than five or six sec-onds. In terms of latency, Figure 1 shows that the addition of the pred-punct segmenter into the incremental system introduces a significant delay. About one third of the increase in delay can be attributed to merely maintaining the two word lookahead window that the segmenter's classifier needs to make decisions. This is significant because this kind of window has been used quite frequently in previous work on simultaneous translation (cf. , and yet to our knowledge this penalty associated with this configuration was never mentioned. The remaining delay can be attributed to the long chunk sizes that the segmenter produces. An interesting aspect of the latency curve associated with the segmenter in Figure 1 is that there are two peaks at ASR timeouts of 2,500 and 4,500 ms, and that the lowest latency is achieved at 3,000 ms rather than at a smaller value. This may be attributed to the fact that the system is a pipeline consisting of ASR, segmenter, and MT, and that 3,000 ms is roughly the length of time to recite comma-separated chunks. Consequently, the two latency peaks appear to correspond with ASR producing segments that are most divergent with segments that the segmenter produces, leading to the most pipeline \"stalls.\" Conversely, the lowest latency occurs when the timeout is set so that ASR's segments most resemble the segmenter's output to MT.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 76, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 188, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 429, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1102, |
|
"end": 1110, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We investigated various approaches for incremental speech translation of TED talks, with the aim of producing a system with high MT accuracy and low latency. For acoustic modeling, we found that VTLN and CMA adaptation were useful for increasing the accuracy of ASR, leading to a word accuracy of 80% on TED talks used in the IWSLT evaluation track. In our offline MT experiments retention of partial translations was found useful for increasing MT accuracy, with the latter being slightly more helpful. We experimented with several linguistic and non-linguistic strategies for text segmentation before translation. Our experiments indicate that a novel segmentation into conjunction-separated sentence chunks resulted in accuracies almost as high and latencies almost as short as comma-separated sentence chunks. They also indicated that signifi-cant noise in the detection of sentences and punctuation did not seriously impact the resulting MT accuracy. Experiments on real-time simultaneous speech translation using partial recognition hypotheses demonstrate that introduction of a segmenter increases MT accuracy. They also showed that in order to reduce latency it is important for buffers in different pipeline components to be synchronized so as to minimize pipeline stalls. As part of future work, we plan to extend the framework presented in this work for performing speech-to-speech translation. We also plan to address the challenges involved in S2S translation across languages with very different word order.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "http://www.ted.com partial speech recognition hypotheses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The segmented and unsegmented talk may not be equal in length, i.e., I = I", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used the standard NIST scoring package as we did not have access to the IWSLT evaluation server that may normalize and score differently", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Simon Byers for his help with organizing the TED talks data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Real-time incremental speech-to-speech translation of dialogs", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Bangalore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Sridhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Kolan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Golipour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Jimenez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of NAACL:HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Bangalore, V. K. Rangarajan Sridhar, P. Kolan, L. Golipour, and A. Jimenez. 2012. Real-time in- cremental speech-to-speech translation of dialogs. In Proceedings of NAACL:HLT, June.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Text segmentation criteria for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Cettolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 5th international conference on Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Cettolo and M. Federico. 2006. Text segmentation criteria for statistical machine translation. In Proceed- ings of the 5th international conference on Advances in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "WIT3: Web Inventory of Transcribed and Translated Talks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Cettolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Girardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of EAMT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Cettolo, C. Girardi, and M. Federico. 2012. WIT3: Web Inventory of Transcribed and Translated Talks. In Proceedings of EAMT.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "The GALE project: A description and an update", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of ASRU Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Cohen. 2007. The GALE project: A description and an update. In Proceedings of ASRU Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Overview of the IWSLT 2011 evaluation campaign", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Paul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "St\u00fcker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of IWSLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Federico, L. Bentivogli, M. Paul, and S. St\u00fcker. 2011. Overview of the IWSLT 2011 evaluation campaign. In Proceedings of IWSLT.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The influence of utterance chunking on machine translation performance", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "F\u00fcgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kolss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. F\u00fcgen and M. Kolss. 2007. The influence of utterance chunking on machine translation performance. In Pro- ceedings of Interspeech.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Open domain speech recognition & translation: Lectures and speeches", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "F\u00fcgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kolss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Bernreuther", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Paulik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Stuker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Waibel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. F\u00fcgen, M. Kolss, D. Bernreuther, M. Paulik, S. Stuker, S. Vogel, and A. Waibel. 2006. Open domain speech recognition & translation: Lectures and speeches. In Proceedings of ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Simultaneous translation of lectures and speeches", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "F\u00fcgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Waibel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kolss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Machine Translation", |
|
"volume": "21", |
|
"issue": "", |
|
"pages": "209--252", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. F\u00fcgen, A. Waibel, and M. Kolss. 2007. Simultaneous translation of lectures and speeches. Machine Trans- lation, 21:209-252.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "The AT&T Watson Speech Recognizer", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Goffin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Allauzen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Bocchieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hakkani-T\u00fcr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ljolje", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Parthasarathy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "Septem-- ber", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. Goffin, C. Allauzen, E. Bocchieri, D. Hakkani-T\u00fcr, A. Ljolje, and S. Parthasarathy. 2004. The AT&T Watson Speech Recognizer. Technical report, Septem- ber.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Optimizing svms for complex call classification", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Haffner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "T\u00fcr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Wright", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of ICASSP'03", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Haffner, G. T\u00fcr, and J. Wright. 2003. Optimizing svms for complex call classification. In Proceedings of ICASSP'03.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "End-to-end evaluation in simultaneous translation", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Hamon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "F\u00fcgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Mostefa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Arranz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kolss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Waibel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Choukri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 12th Conference of the European Chapter", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "O. Hamon, C. F\u00fcgen, D. Mostefa, V. Arranz, M. Kolss, A. Waibel, and K. Choukri. 2009. End-to-end evalua- tion in simultaneous translation. In Proceedings of the 12th Conference of the European Chapter of the ACL (EACL 2009), March.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The IBM 2009 GALE Arabic speech translation system", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Kingsbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Soltau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Saon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong-Kwang", |
|
"middle": [], |
|
"last": "Kuo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Mangu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Ravuri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Morgan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Janin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B. Kingsbury, H. Soltau, G. Saon, S. Chu, Hong-Kwang Kuo, L. Mangu, S. Ravuri, N. Morgan, and A. Janin. 2011. The IBM 2009 GALE Arabic speech translation system. In Proceedings of ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Moses: Open source toolkit for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Constantin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Herbst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Koehn, H. Hoang, A. Birch, C. Callison-Burch, M. Federico, N. Bertoldi, B. Cowan, Shen W., C. Moran, R. Zens, C. J. Dyer, O. Bojar, A. Constantin, and E. Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Europarl: A parallel corpus for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Koehn. 2005. Europarl: A parallel corpus for statisti- cal machine translation. In MT Summit.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Simultaneous interpretation: units of meaning and other features", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Lederer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1978, |
|
"venue": "Language interpretation and communication", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "323--332", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Lederer. 1978. Simultaneous interpretation: units of meaning and other features. In D. Gerver and H. W. Sinaiko, editors, Language interpretation and commu- nication, pages 323-332. Plenum Press, New York.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Building a large annotated corpus of English: the Penn treebank", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Santorini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Marcinkiewicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Computational Linguistics", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "313--330", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Marcus, B. Santorini, and M. A. Marcinkiewicz. 1993. Building a large annotated corpus of En- glish: the Penn treebank. Computational Linguistics, 19(2):313-330.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Evaluating machine translation output with automatic sentence segmentation", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matusov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Leusch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Bender", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of IWSLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. Matusov, G. Leusch, O. Bender, and H. Ney. 2005. Evaluating machine translation output with automatic sentence segmentation. In Proceedings of IWSLT.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Improving speech translation with automatic boundary prediction", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matusov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hillard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Magimai-Doss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hakkani-T\u00fcr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. Matusov, D. Hillard, M. Magimai-Doss, D. Hakkani- T\u00fcr, M. Ostendorf, and H. Ney. 2007. Improving speech translation with automatic boundary predic- tion. In Proceedings of Interspeech.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "At&t general-purpose finite-state machine software tools", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mohri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Riley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Mohri, F. Pereira, and M. Riley. 1997. At&t general-purpose finite-state machine software tools, http://www.research.att.com/sw/tools/fsm/.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A systematic comparison of various statistical alignment models", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Computational Linguistics", |
|
"volume": "29", |
|
"issue": "1", |
|
"pages": "19--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. J. Och and H. Ney. 2003. A systematic comparison of various statistical alignment models. Computational Linguistics, 29(1):19-51.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A scalable approach to building a parallel corpus from the Web", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Sridhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Barbosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Bangalore", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. K. Rangarajan Sridhar, L. Barbosa, and S. Bangalore. 2011. A scalable approach to building a parallel cor- pus from the Web. In Proceedings of Interspeech.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Optimizing sentence segmentation for spoken language translation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Lane", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Schultz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Rao, I. Lane, and T. Schultz. 2007. Optimizing sen- tence segmentation for spoken language translation. In Proceedings of Interspeech.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Probabilistic part-of-speech tagging using decision trees", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Proceedings of the International Conference on New Methods in Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Schmid. 1994. Probabilistic part-of-speech tagging using decision trees. In Proceedings of the Interna- tional Conference on New Methods in Language Pro- cessing.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "The JRC-Acquis: A multilingual aligned parallel corpus with 20+ languages", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Pouliquen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Widiger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Ignat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Erjavec", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Tufis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Steinberger, B. Pouliquen, A. Widiger, C. Ignat, T. Er- javec, and D. Tufis. 2006. The JRC-Acquis: A multi- lingual aligned parallel corpus with 20+ languages. In Proceedings of LREC.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "The OPUS corpus -parallel & free", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"Lars" |
|
], |
|
"last": "Nygaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Tiedemann and L. Lars Nygaard. 2004. The OPUS corpus -parallel & free. In Proceedings of LREC.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Statistical machine translation of European parliamentary speeches", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Vilar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matusov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Hasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of MT Summit", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Vilar, E. Matusov, S. Hasan, R. Zens, and H. Ney. 2005. Statistical machine translation of European par- liamentary speeches. In Proceedings of MT Summit.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Verbmobil: Foundations of Speech-to-Speech Translation", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Wahlster", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "W. Wahlster, editor. 2000. Verbmobil: Foundations of Speech-to-Speech Translation. Springer.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"text": "Statistics of the data used for training the speech translation models.", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "ASR word accuracies on the IWSLT data sets.3", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |