|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:55:11.458309Z" |
|
}, |
|
"title": "MTL-SLT: Multi-Task Learning for Spoken Language Tasks", |
|
"authors": [ |
|
{ |
|
"first": "Zhiqi", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Peking University", |
|
"location": { |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Milind", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Amazon Alexa", |
|
"institution": "", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Anirudh", |
|
"middle": [], |
|
"last": "Raju", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Amazon Alexa", |
|
"institution": "", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Amazon Alexa", |
|
"institution": "", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Bach", |
|
"middle": [], |
|
"last": "Bui", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Amazon Alexa", |
|
"institution": "", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chul", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Amazon Alexa", |
|
"institution": "", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Language understanding in speech-based systems has attracted extensive interest from both academic and industrial communities in recent years with the growing demand for voice-based applications. Prior works focus on independent research by the automatic speech recognition (ASR) and natural language processing (NLP) communities, or on jointly modeling the speech and NLP problems focusing on a single dataset or single NLP task. To facilitate the development of spoken language research, we introduce MTL-SLT, a multi-task learning framework for spoken language tasks. MTL-SLT takes speech as input, and outputs transcription, intent, named entities, summaries, and answers to text queries, supporting the tasks of spoken language understanding, spoken summarization and spoken question answering respectively. The proposed framework benefits from three key aspects: 1) pre-trained sub-networks of ASR model and language model; 2) multitask learning objective to exploit shared knowledge from different tasks; 3) end-to-end training of ASR and downstream NLP task based on sequence loss. We obtain state-of-the-art results on spoken language understanding tasks such as SLURP and ATIS. Spoken summarization results are reported on a new dataset: Spoken-Gigaword.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Language understanding in speech-based systems has attracted extensive interest from both academic and industrial communities in recent years with the growing demand for voice-based applications. Prior works focus on independent research by the automatic speech recognition (ASR) and natural language processing (NLP) communities, or on jointly modeling the speech and NLP problems focusing on a single dataset or single NLP task. To facilitate the development of spoken language research, we introduce MTL-SLT, a multi-task learning framework for spoken language tasks. MTL-SLT takes speech as input, and outputs transcription, intent, named entities, summaries, and answers to text queries, supporting the tasks of spoken language understanding, spoken summarization and spoken question answering respectively. The proposed framework benefits from three key aspects: 1) pre-trained sub-networks of ASR model and language model; 2) multitask learning objective to exploit shared knowledge from different tasks; 3) end-to-end training of ASR and downstream NLP task based on sequence loss. We obtain state-of-the-art results on spoken language understanding tasks such as SLURP and ATIS. Spoken summarization results are reported on a new dataset: Spoken-Gigaword.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The wide deployment of voice controlled computing has led to extensive interest in spoken language tasks in recent years (Saade et al., 2019; Bastianelli et al., 2020; Li et al., 2018) . For instance, spoken language understanding aims to extract the semantics from user queries Kim et al., 2021a; Lai et al., 2021) , spoken question answering aims to predict the answer given the spoken context (You et al., 2021; Kuo et al., 2020) . The rapid development of spoken language tasks have followed dataset releases (Zhang et al., 2020; Table 1 : An example of multiple spoken language tasks. Given input utterances in the form of speech, the ASR-NLP system can provide a summary of the speech (summarization), intent detection and named entity recognition (language understanding) and answer textual queries. The spoken question answering task requires additional questions as input.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 141, |
|
"text": "(Saade et al., 2019;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 167, |
|
"text": "Bastianelli et al., 2020;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 184, |
|
"text": "Li et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 297, |
|
"text": "Kim et al., 2021a;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 315, |
|
"text": "Lai et al., 2021)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 414, |
|
"text": "(You et al., 2021;", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 432, |
|
"text": "Kuo et al., 2020)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 513, |
|
"end": 533, |
|
"text": "(Zhang et al., 2020;", |
|
"ref_id": "BIBREF47" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 534, |
|
"end": 541, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "models (Devlin et al., 2019; Lewis et al., 2020; Chuang et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 28, |
|
"text": "(Devlin et al., 2019;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 29, |
|
"end": 48, |
|
"text": "Lewis et al., 2020;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 49, |
|
"end": 69, |
|
"text": "Chuang et al., 2020)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Multi-task learning (MTL) (Caruana, 1997) focuses on simultaneously solving multiple related tasks and has attracted much attention in recent years. Compared with single-task learning, it can reduce the training and inference time while improving generalization performance and prediction accuracy by learning a shared representation across related tasks. Prior works show the effectiveness of MTL while they only focus on multiple text-based tasks/datasets (e.g., MT-DNN ) or multiple speechbased tasks/datasets (e.g., SpeechStew (Chan et al., 2021) ). Also, some works (Raju et al., 2021; Rao et al., 2021) prove the effectiveness of considering speech information when performing NLP tasks. Thus, as can be seen in Figure 1 , we argue that it is helpful when extend these MTL approaches to spoken language tasks (i.e., ASR-NLP-shared). Audio Frames In this paper, we develop multi-task learning methods to optimize spoken summarization, spoken question answering, spoken language understanding (intent classification and slot filling), as well as speech recognition on multiple spoken language datasets. An example of an application with these four tasks can be seen in Table 1 . Note that instead of performing experiments only on understanding task (e.g., Feng et al. (2021) ), we also consider harder generation task into our framework, whose data distribution has significant difference to classification task (Observation can be witnessed from Figure 2 , the purple points are far away from the other data).", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 41, |
|
"text": "(Caruana, 1997)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 550, |
|
"text": "(Chan et al., 2021)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 571, |
|
"end": 590, |
|
"text": "(Raju et al., 2021;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 591, |
|
"end": 608, |
|
"text": "Rao et al., 2021)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1261, |
|
"end": 1279, |
|
"text": "Feng et al. (2021)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 718, |
|
"end": 726, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1173, |
|
"end": 1180, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1452, |
|
"end": 1460, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A primary challenge with audio as an input modality is the impact of speech recognition errors and acoustic noise on spoken language tasks. To mitigate this, our approach jointly optimizes pretrained speech recognition and language models for semantic metrics of interest and we train across multiple language tasks. The various language tasks and the impact of multi-task training can be visualized in the clustering plot of the hidden state of a pretrained language model in Figure 2 . We demonstrate our results using listen-attend-spell (LAS) (Chan et al., 2016) speech recognition model and a BART (Lewis et al., 2020) based NLP model. Overall, the main contributions are as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 547, |
|
"end": 566, |
|
"text": "(Chan et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 603, |
|
"end": 623, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 477, |
|
"end": 485, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose a MTL-SLT framework to effectively joint train an ASR model and an NLP model on multiple spoken language tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Experimental results show that our proposed multi-task learning framework is state-of-the-art on spoken language understanding tasks. Training multiple language tasks followed by task-specific finetuning yields optimal models. Jointly training ASR and NLP with policy gradient methods improves metrics on all spoken language tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We prepare a spoken summarization dataset based on the Gigaword dataset (Rush et al., 2015 ) using a multi-speaker text-to-speech (TTS) model. The performance of the introduced spoken-summarization task with the MTL framework is studied.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 92, |
|
"text": "(Rush et al., 2015", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Our approach extends to multiple NLP tasks, providing improvements in an end-to-end spoken language learning setting. We make our code and data publicly available for researchers to accelerate the development of related spoken language tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "MTL MTL aims to improve the performance on a set of primary tasks through an inductive bias (Caruana, 1997) introduced by additional training objectives on auxilliary tasks. MTL has also been used to train several tasks jointly, without the notions of primary and auxilliary tasks (McCann et al., 2018) . MTL approaches for deep learning include hard parameter sharing where the entire layers and parameters are shared between tasks; and soft parameter sharing, where each task has it's own model parameters but the distance between the model parameters is regularized to help the taskspecific parameters to be similar (Ruder, 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 107, |
|
"text": "(Caruana, 1997)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 302, |
|
"text": "(McCann et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 619, |
|
"end": 632, |
|
"text": "(Ruder, 2017)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Pre-trained Models The paradigm of pre-training a language model (LM) followed by task-specific fine-tuning has been shown to obtain remarkable performance on many NLP tasks. BERT (Devlin et al., 2019) pre-trains deep bidirectional representations from unlabeled text and showed competitive performance on the GLUE benchmark. This provided a base for researchers to build upon, leading to several extensions and rapid progress in the space of pre-trained LMs. The MultiTask Deep Neural Network is one such extension with multi-task learning across all GLUE tasks. The paper argues for improved domain transfer by performing standard BERT pretraining, followed by multi-task learning and task-specific fine-tuning. BERT has been leveraged for various NLP tasks, for e.g. the effectiveness of BERT for the summarization task was explored by Liu and Lapata (2019) . The performance of text generation tasks have been approaching a near-human level by virtue of pre-trained encoderdecoder models, such as BART (Lewis et al., 2020) and T5 (Raffel et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 201, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 839, |
|
"end": 860, |
|
"text": "Liu and Lapata (2019)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1006, |
|
"end": 1026, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1034, |
|
"end": 1055, |
|
"text": "(Raffel et al., 2020)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Spoken Language Tasks Spoken language tasks include standard NLP tasks with speech-input instead of text-input. Speech recognition errors can impact the performance of downstream NLP systems. Recently, Feng et al. (2021) proposed the ASR-GLUE benchmark, augmented 6 NLP tasks from GLUE with speech generated from Google TTS, and analyzed the robustness of NLP to ASR errors. However, all 6 tasks are sentence-level classification problems, and the models did not utilize MTL framework. introduced a speech-language joint pre-training framework for SLU tasks. The paper showed the effectiveness of the joint pre-training method with experiments on four classification tasks, i.e., intent detection, dialog act classification, spoken sentiment analysis and spoken question answering. Prior works for SLU show the impact of speech recognition errors on downstream Natural Language Understanding (NLU) performance and propose joint training of ASR and NLU to improve overall performance (Rao et al., 2021) . Kim et al. (2021b) introduced a speech-based benchmark for task-oriented dialogue systems, specifically targeting the problems of multi-domain dialogue state tracking and knowledge grounded dialogue modeling, and showed that well-behaved models trained on written conversations do not perform well on spoken data. 3 Approach 3.1 Architecture of MTL-SLT Figure 3 shows the proposed MTL framework which consists of three different modules, i.e., the ASR model, the NLP model and the interface between them. In this work, the MTL-SLT uses the LAS architecture for ASR and BART for NLP.", |
|
"cite_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 220, |
|
"text": "Feng et al. (2021)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 983, |
|
"end": 1001, |
|
"text": "(Rao et al., 2021)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1004, |
|
"end": 1022, |
|
"text": "Kim et al. (2021b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1357, |
|
"end": 1365, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "ASR Model Unlike previous works on spoken language tasks (SLT) that obtain transcriptions using existing ASR systems/tools (Feng et al., 2021; Li et al., 2018) , in our approach, the ASR model is updated with the training of end-to-end spoken language tasks. To address this, we generate the ASR transcriptions from a LAS model explained in (Rao et al., 2021; Chan et al., 2016) , and pre-trained it on the LibriSpeech dataset (Panayotov et al., 2015) following previous works (Lugosch et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 142, |
|
"text": "(Feng et al., 2021;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 143, |
|
"end": 159, |
|
"text": "Li et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 359, |
|
"text": "(Rao et al., 2021;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 378, |
|
"text": "Chan et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 427, |
|
"end": 451, |
|
"text": "(Panayotov et al., 2015)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 499, |
|
"text": "(Lugosch et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Enc-Decoder NLP Model Bidirectional and Auto-Regressive Transformers (BART) (Lewis et al., 2020) uses a separate bidirectional encoder and autoregressive decoder similar to BERT (Devlin et al., 2019) except that (1) BART's decoder incorporates cross attention over the final encoder layer and (2) BART's encoder does not use a feedforward dense layer for word prediction. The BART model can be used to perform both language understanding (i.e., intent classification) and language generation (i.e., summarization) problems at the same time, we refer to it as an NLP model in this work. We use the same pre-trained BARTbase model as the original paper, which includes 6 transformer layers in the encoder and decoder.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Spoken Language Interface The interface exposes relevant outputs from the ASR model to the downstream NLP model. Prior works have proposed rich interfaces that expose neural embeddings from ASR in addition to the text recognition (Rao et al., 2020) . In this work, we use a simple text interface i.e. the best text recognition hypothesis from the output of ASR as the input to the NLP models. We leverage pre-trained models for both ASR and NLP. Inspired by (Rao et al., 2021; Raju et al., 2021) , we introduce sequence loss training for the joint ASR-NLP system that allows direct optimization of non-differentiable SLT metrics. Specifically, we consider the error rate of ASR, summarization, QA, intent classification and slot filling as the SLT metrics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 230, |
|
"end": 248, |
|
"text": "(Rao et al., 2020)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 458, |
|
"end": 476, |
|
"text": "(Rao et al., 2021;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 495, |
|
"text": "Raju et al., 2021)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "1 / 29 Zhiqi Huang ASR Audio Frames ASR Shared LAS Enc/Decoder Shared BART Task #1: Summarization Task #2: Intent Classification Task #3: Slot Filling Task #4: Question Answer w 1 1 , w 1 2 , \u2026 <eos> w 2 1 , w 2 2 , \u2026 <eos> \u2026 w n 1 , w n 2 , \u2026", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The MTL Training Strategy can be divided into three steps.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint MTL Training Strategy", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Backbone Pre-training The ASR model is first pre-trained for the speech recognition task using the LibriSpeech dataset. The NLP model uses the pre-trained BART (Lewis et al., 2020) model which is trained to reconstruct corrupted text.", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 180, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint MTL Training Strategy", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "MTL Pre-training Our joint pre-training on multiple tasks falls into the paradigm of multi-task learning (MTL). Training details of the MTL-SLT can be seen in Algorithm 1, in the training stage, we take turns to load the training data of these pre-training tasks. For example, we update model parameters on a batch of training instances from the first task, and then update parameters on a batch of training instances of the second task, and the process repeats. Note that, according to our preliminary experimentation, the effect of different orders of carrying out these pre-training tasks is negligible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint MTL Training Strategy", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Post Fine-tuning After pre-trained with MTL objective, the MTL model is further fine-tuned on each dataset with few training steps to improve the performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint MTL Training Strategy", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "There are three types of losses to be optimized in our framework, i.e., ASR loss, language taskspecific losses and sequence losses. Our model is first trained by updating \u03b8 ASR based on the ASR loss, then trained by updating \u03b8 N LP for each downstream task. Finally, sequence loss training is employed to update both \u03b8 ASR and \u03b8 N LP .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Losses", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "ASR Loss Given input audio sequence x, the ASR system is trained by teacher-forcing the encoder-decoder network with the tokens of the ground truth transcript w with the loss function being L asr = \u2212 N j=1 log p(w j |x, w :j\u22121 ; \u03b8). Intent Detection For sentence-level classification problem, denote the sentence pooled represen-Algorithm 1: Training a MTL-SLT model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Losses", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Parameter: Pre-trained LAS model and BART model \u03b8, random initialized task specific heads , epoch number M , task number T . //Prepare the data for T tasks. for t in 1, 2, ..., T do Pack the dataset t into mini-batch: D t . end // Multi-task Learning. for epoch in 1, 2, ..., M do 1. Merge all the datasets:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Losses", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "D = D 1 \u222a D 2 ... \u222a D T 2. Shuffle D for b t in D do //b t is a mini-batch of task t. 3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Losses", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Compute loss : L(\u03b8) //Train the ASR and NLP tasks. tation as e from input ASR token sequence w, and the correct intent label is c, the model infers c from e. The negative log-likelihood loss is used for the classification loss L intent = \u2212 log p(c|e; \u03b8).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Losses", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "L(\u03b8) += L asr for ASR L(\u03b8) += L gen for Summarization L(\u03b8) += L tagging for Slot Filling L(\u03b8) += L intent for Intent Detection L(\u03b8) += L qa for Question Answer if perform joint training then L(\u03b8) += L", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Losses", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "For token-level classification problem, denote the slot sequence as s, the input as v, and the sequence length as N , the negative log-likelihood loss is used for calculating slot loss", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "L tagging = \u2212 N j=1 log p(s j |v, s :j\u22121 ; \u03b8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ". Summarization The summarization of x is defined as y = (y 1 , . . . , y M ). The model infers an appropriate y from v. The generation loss L gen is calculated with the negative log-likelihood loss L gen = \u2212 N j=1 log p(y j |v, y :j\u22121 ; \u03b8). Question Answering For question answering, we employ binary cross entropy loss on the sentence pooling representation L has_key and the spanbased losses (Rajpurkar et al., 2016) on the sen-", |
|
"cite_spans": [ |
|
{ |
|
"start": 395, |
|
"end": 419, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "tence representation L span . The QA loss is L qa = L has_key + L span .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Sequence Losses Inspired by reinforce framework (Prabhavalkar et al., 2018) , sequence loss training enables end-to-end joint training of ASR and a downstream language task (Rao et al., 2021) . Denote C as a joint sequence of ASR and NLP outputs, this is done by directly optimizing model parameters \u03b8 for the expected metric cost M (c, c * ) over the distribution of candidate hypotheses. Here c * is the ground-truth output and c is a model candidate. This is expressed as,", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 75, |
|
"text": "(Prabhavalkar et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 191, |
|
"text": "(Rao et al., 2021)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L seq = E C\u2208C [M (C, c * )] (1) \u21d2 \u2207 \u03b8 L seq = \u2207 \u03b8 E C\u2208C [M (C, c * )] (2) \u2248 \u2207 \u03b8 c\u2208Cp \u03b8 (c)M (c, c * ) (3) \u2248 c\u2208C M (c, c * )\u2207 \u03b8p\u03b8 (c).", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here, the approximation of the expectation in Eq. 3is from using an n-best candidate setC produced by the model with each candidate arising from a normalized probabilityp \u03b8 (c) =", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "p \u03b8 (c) c \u2032 \u2208C p \u03b8 (c \u2032 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ". The probability of a candidate c is given by the combination of ASR and language task probabilities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Sequence loss training is a policy gradient approach that jointly trains \u03b8 ASR and \u03b8 N LP by increasing the prediction probability of candidates with lower metric costs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this work, we optimize for a composite metric which is a sum of metrics of interest, namely, word error rate (WER) for ASR task and a language task metric. The metrics for language task include:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(1) rouge error rate for the summarization task, (2) exact match error rate and QA F1 error rate for question answering, and (3) intent and domain classification error rate as well as SLU-F1 error rate for the language understanding task. These metrics are further detailed in Sec. 4.3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Sequence loss training can be done for an individual task and is used in conjunction with the cross-entropy losses defined earlier that acts as a regularizing term. It can also be combined with multi-task learning by applying task-appropriate sequence loss training to update relevant parameters for a batch from the merged dataset. Table 2 : Main results of different models and settings on different datasets. BOLD BLACK numbers are in the first place for ASR and NLP settings, BOLD RED numbers are in the first place for Pipeline and jointly settings. A (\u2193) means lower is better, and (\u2191) means higher is better. a) For evaluation, we choose four typical and large generation and understanding datasets, i.e., Spoken-Gigaword, Spoken-SQuAD, ATIS and SLURP. b) For trainging settings, ASR and NLP represent two independent systems for their own tasks. Pipeline means that the output transcriptions from the pre-trained ASR system are used as the input of the pre-trained NLP system. Jointly training means that the parameters of ASR and NLP system are jointly optimized through extra sequence losses. c) For models, we use LAS for ASR system and BART for NLP system empirically. Single models (S) are treated as baselines and trained only on their own task. MTL models (M) mean that parameters are shared across four tasks and trained together. S -> S means pipeline training of LAS-S and then BART-S. S + S refers to pre-trained LAS-S and BART-S which are further jointly trained with sequence loss.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 333, |
|
"end": 340, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Slot Filling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We perform experiments on four datasets, three of which are existing public corpora (ATIS, SLURP, Spoken-SQuAD) and one is generated by us (Spoken-gigaword).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "ATIS Airline Travel Information Systems (ATIS) (Hemphill et al., 1990; Shivakumar et al., 2019) is a widely used Spoken Language Understanding dataset for airline reservation, where the user's intent and utterance's slots are predicted given the input command.", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 70, |
|
"text": "(Hemphill et al., 1990;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 71, |
|
"end": 95, |
|
"text": "Shivakumar et al., 2019)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "SLURP SLURP (Bastianelli et al., 2020 ) is a recently released Spoken Language Understanding dataset. It is larger and more semantically complex compared to ATIS dataset. The SLURP is a collections of 72k audio recordings of single turn user interactions with a home assistant on 18 domains.", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 37, |
|
"text": "(Bastianelli et al., 2020", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Spoken-SQuAD Spoken-SQuAD (Li et al., 2018 ) is a large extraction-based Spoken Question Answering (SQA) dataset, where the answer of question is predicted given corresponding context. For the dataset, the context is in the form of speech and text, while the question and the answer are in the form of text. The transcripts of Spoken-SQuAD are collected from SQuAD benchmark dataset (Rajpurkar et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 42, |
|
"text": "(Li et al., 2018", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 383, |
|
"end": 407, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Spoken-Gigaword Spoken-Gigaword is a large summarization dataset. It is formulated as a summary generation problem, where the general headlines are generated given articles. Considering that Gigaword is abstractive summaries generation dataset with large amount of data, it can provide possibility for designing data-driven models. The transcripts of Spoken-Gigaword are collected from Gigaword (Rush et al., 2015) , the speech of Spoken-Gigaword are generated by existing TTS model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 395, |
|
"end": 414, |
|
"text": "(Rush et al., 2015)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For the MTL-SLT model, we use LAS as the ASR model, where the input audio features are 64-dim log-mel filterbank features computed over a 25 ms window, with 10 ms shifts, the text is tokenized into subword tokens using a unigram language model (Kudo, 2018) of vocabulary of 4500. We use BART-base as NLP model, which has 6 encoder layers and 6 decoder layers, a hidden size of 768, filter size of 3,072, and 12 attention heads. We apply the default hyper-parameters from prior works (Rao et al., 2021; Lewis et al., 2020) including the learning rate schedule.", |
|
"cite_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 256, |
|
"text": "(Kudo, 2018)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 483, |
|
"end": 501, |
|
"text": "(Rao et al., 2021;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 521, |
|
"text": "Lewis et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In this section, we show the evaluation metrics for each tasks. For extractive question answering task (Rajpurkar et al., 2016) , it is evaluated with two metrics: Exact Match (EM) to check whether the answer extracted by the model are exactly the same as the correct answer and F1 score to measure the degree of word overlap at token level. For summarization, we follow previous work (Rush et al., 2015) and use ROUGE-1 (unigrams), ROUGE-2 (bigrams), and ROUGE-L (longest-common substring) (Lin, 2004) . For ATIS dataset, we evaluate it with intent classification accuracy and slot filling F1 score (Hemphill et al., 1990; Ruan et al., 2020) . For SLURP dataset, we evaluate it with intent-domain classification accuracy and slot filling SLU-F1 score proposed in Bastianelli et al. (2020) , which does not overly penalise misalignments caused by ASR errors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 127, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 404, |
|
"text": "(Rush et al., 2015)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 491, |
|
"end": 502, |
|
"text": "(Lin, 2004)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 600, |
|
"end": 623, |
|
"text": "(Hemphill et al., 1990;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 624, |
|
"end": 642, |
|
"text": "Ruan et al., 2020)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 764, |
|
"end": 789, |
|
"text": "Bastianelli et al. (2020)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Metrics", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Results of different models and settings on four datasets are shown in Table 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 78, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Main Results", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "ASR Taking word error rate (WER) as evaluation metric, we can see that the MTL has some advantages for the ASR task. From Setting 1, MTL helps improve the performance of LAS model on ASR when pooling data across tasks. From Setting 4, when jointly training with the NLP model, the MTL setting sees better performance than independently training ASR. Comparing the S+S in Setting 4 to the LAS-S and LAS-M in Setting 1, the improvements as per ASR from jointly training are (1.91% on average) larger than from MTL (1.28% on average), we attribute this to the optimization of ASR using sequence loss training for word error rate as well as related semantic metrics, similar conclusion can be witnessed in Rao et al. (2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 702, |
|
"end": 719, |
|
"text": "Rao et al. (2021)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Main Results", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "NLP NLP system is different from the ASR system, in which all datasets are trained for same objective. For different NLP tasks, they share the backbone BART parameters and update their own task specific heads. From Table 2 , we can see that BART-M has improvements over all independent models on all metrics, which proves the effectiveness of MTL in NLP system. Classification tasks see larger improvements than the generation tasks. In Setting 3 and Setting 4, NLP tasks can be further improved through jointly training, which shows the potential of sequence loss training in ASR-NLP system to make the system robust to acoustic noise. In Setting 4, M+M performs better than S+S, proving the effectiveness of MTL in ASR-NLP system.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 222, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Main Results", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "After pre-training the ASR and NLP model in single task mode or on multiple tasks, we have two methods to jointly use them, the pipeline method that is nondifferentiable and the outputs of ASR system are directly treated as inputs of NLP system, and the jointly training method with sequence loss that is differentiable and can pass the gradient from NLP system to ASR system. From Table 2 , we can see that results of different spoken language tasks in Setting 4 are better than in Setting 3, under both of independent training models and multi-tasks training models. Also multi-task trained models always perform better than independent trained models, no matter under pipeline setting or jointly training setting showing that both these effects are orthogonal and can complement one another.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 382, |
|
"end": 389, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pipeline and Jointly Training Methods", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Comparison with Existing Works We show the comparison results of our method to previous works on SLURP and ATIS in Table 4 . Results are reported on the test set of ATIS and SLURP, as well as the development set of Spoken-SQuAD. From Table 3 , because it is a recently released large SLU dataset, there are not too much previous works that we can refer, but we still get best performance compared the existing works to our knowledge.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 115, |
|
"end": 122, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 234, |
|
"end": 241, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pipeline and Jointly Training Methods", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Trained on text NLU* (Bastianelli et al., 2020) 84.84 -NLU+ (Seo et al., 2021) 87.73 84.34 BART (Lewis et al., 2020) 88.00 85. ASR Chan et al. (2021) shows that by simply mixing multiple ASR datasets together, ASR model can perform better on each dataset, and can learn powerful transfer learning representation. Inspired by this, in our experiment, we would also like to investigate the performance change after employing multi-task training only on the experimented audio data and transcription. Specifi-", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 47, |
|
"text": "(Bastianelli et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 60, |
|
"end": 78, |
|
"text": "(Seo et al., 2021)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 96, |
|
"end": 116, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 127, |
|
"end": 149, |
|
"text": "ASR Chan et al. (2021)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models Acc SLU-F1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Trained on text Attention BiRNN (Liu and Lane, 2016) 91.10 94.20 Capsule-NLU (Zhang et al., 2019) 95.00 95.20 LIDSNet (Agarwal et al., 2021) 95.97 - SF-ID Network (E et al., 2019) 96.60 95.60 SyntacticTF 97.31 96.01 BERT SLU 97.50 96.10 Stack-Prop. (Qin et al., 2019) 96.90 95.90 Stack-Prop. + BERT (Qin et al., 2019) 97.50 96.10 ASR Error Robust SLU (Ruan et al., 2020) 97 cally, during training, only the LAS model is shared across different tasks. Results can be seen in Setting 1 row LAS-M, in Table 2 . We can see that after employing more data, LAS performs better on each dataset, which proves that it is effective to perform more data on ASR model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 52, |
|
"text": "(Liu and Lane, 2016)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 77, |
|
"end": 97, |
|
"text": "(Zhang et al., 2019)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 140, |
|
"text": "(Agarwal et al., 2021)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 179, |
|
"text": "SF-ID Network (E et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 267, |
|
"text": "(Qin et al., 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 317, |
|
"text": "(Qin et al., 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 370, |
|
"text": "(Ruan et al., 2020)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 498, |
|
"end": 505, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Acc F1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We can see from Table 2 that with multi-task training, BART performs better in both the text-based setting (i.e., BART) and jointly training setting (i.e., LAS-BART).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 23, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "MTL on NLP", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "With the used sequence loss ( L seq ), we can train not only the ASR model NLP model independently, but also train both of them in an end-to-end manner. We compared the models with and without L seq , and the result are shown in Table 2 . By using the L seq , we observe improvements in ASR and NLP metrics by 2-5%. Sequence loss training allows for the downstream language modelling task to be trained with potentially erroneous ASR hypotheses allowing for robustness to word errors. This also minimizes the domain shift that occurs from training (language task has the clean ground truth transcription as input) to inference (language task has ASR hypotheses as input) resulting in improved performance. Another impact of sequence loss training is that ASR is optimized for differentiable (eg. cross-entropy), non-differentiable (eg. WER) ASR losses along with arbitrary non-differentiable metrics of interest (eg. rouge scores, SLU-F1) of the downstream language task.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 236, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of Sequence Loss", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The post fine-tuning step described in 3.2 is important in our framework, because 1) it can eliminate differences between datasets arising from different domains; 2) the optimal performance of different datasets falls on different positions of a paretooptimal surface, post fine-tuning can solve this problem without introducing more parameters. Effect of post fine-tuning can be seen in Table 5 . ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 388, |
|
"end": 395, |
|
"text": "Table 5", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of Post Fine-tuning", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Models", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Effect of Post Fine-tuning", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We proposed a multi-task learning framework for spoken language understanding tasks that take speech as input and produces (1) intents and namedentities in language understanding tasks, (2) abstract text summaries, or (3) question answering. This framework can be extended to other language tasks such as translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this framework, we make use of pretrained ASR models and language models like BART and jointly train these layers across multiple language tasks. We demonstrate that this training across tasks coupled with task-specific post-finetuning produces significantly better results for ASR and BART separately. We made use of the sequence loss training framework to enable end-to-end training of ASR and BART to optimize for metrics of interest for the classification, sequence tagging, and generation tasks. This made the downstream language task robust to errors in ASR hypotheses that otherwise leads to performance degradation in pipelined ASR and language task systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We demonstrate state-of-the-art results on public corpora of SLURP and ATIS for spoken language understanding. We also prepare the Spoken-Gigaword dataset for abstractive summarization of speech.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "As a pre-trained sequence-to-sequence denoising autoencoder, BART uses a standard Transformerbased neural machine translation architecture, which consists of 6 encoder and 6 decoder segments. In our work, we attribute each tasks with task specific classification head over the BART model. Specifically, for the Intent Detection task, we use the End-Of-Sentence (EOS) token on the last decoder layer to do the prediction; for the slot filling task, we predict the slot labels in BIO format after the last encoder layer; for the summarization task, generated sentences with EOS token at end are used to calculate the summarized loss; for the question answering task, EOS token in the last decoder layer is used to predict the answer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In Section 3.3, we mention the L has_key in Spoken Question Answering. Actually, Spoken-SQuAD is a dataset with all examples having answers. However, since the input context of each example is too long, if we process the input audio directly, the model's performance will be very poor. Thus, instead of processing the input audio directly, we first split the input into sentence-wise segments, and then during the training, we predict the answer on each sentence. Note that we have a classification head to determine whether this sentence contains the answer or not, and the loss over this classification head is L has_key .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "D E2E Spoken Question Answering", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "For the experimental datasets (Spoken-SQuAD, SLURP, ATIS), we follow the default train/dev/test splits from the original paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Statistics of datasets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We show the detailed hyperparameters for the MTL Pre-training and Post Fine-tuning stages described in Section 3.2 of the proposed method on different datasets in Table 6 . Table 6 : Hyperparameters for the Pre-training and finetuning stages in training MTL-SLT on the four datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 163, |
|
"end": 170, |
|
"text": "Table 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 180, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.2 Hyperparameters", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The detail statistics of the generated Spoken-Gigaword dataset are shown in Table 7 . The articles and summarizations are acquired from gigaword headline generation dataset (Rush et al., 2015) , we then generate the speech data for the articles using Tacotron2 (Shen et al., 2018) to extract feature and . Note that because the input article is noisy, which make it hard to generate proper speech, so we remove the ones with special symbols, and we remove the articles that have more than 30 words. The implementation is based on an open source library 1 .C Model Structure of NLP task with BART Model", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 192, |
|
"text": "(Rush et al., 2015)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 280, |
|
"text": "(Shen et al., 2018)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 83, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Spoken-gigaword Dataset", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Lidsnet: A lightweight on-device intent detection model using deep siamese network", |
|
"authors": [ |
|
{ |
|
"first": "Vibhav", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sourav", |
|
"middle": [], |
|
"last": "Sudeep Deepak Shivnikar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Himanshu", |
|
"middle": [], |
|
"last": "Ghosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yashwant", |
|
"middle": [], |
|
"last": "Arora", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Saini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vibhav Agarwal, Sudeep Deepak Shivnikar, Sourav Ghosh, Himanshu Arora, and Yashwant Saini. 2021. Lidsnet: A lightweight on-device intent detec- tion model using deep siamese network. CoRR, abs/2110.15717.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "SLURP: A spoken language understanding resource package", |
|
"authors": [ |
|
{ |
|
"first": "Emanuele", |
|
"middle": [], |
|
"last": "Bastianelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Vanzo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emanuele Bastianelli, Andrea Vanzo, Pawel Swietojan- ski, and Verena Rieser. 2020. SLURP: A spoken lan- guage understanding resource package. In EMNLP. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Multitask learning. Machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Caruana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "41--75", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rich Caruana. 1997. Multitask learning. Machine learning, 28(1):41-75.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Listen, attend and spell: A neural network for large vocabulary conversational speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Chan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navdeep", |
|
"middle": [], |
|
"last": "Jaitly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Chan, Navdeep Jaitly, Quoc V. Le, and Oriol Vinyals. 2016. Listen, attend and spell: A neural network for large vocabulary conversational speech recognition. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Speechstew: Simply mix all available speech recognition data to train one large neural network", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Chan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Norouzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Chan, Daniel S. Park, Chris Lee, Yu Zhang, Quoc V. Le, and Mohammad Norouzi. 2021. Speech- stew: Simply mix all available speech recognition data to train one large neural network. CoRR, abs/2104.02133.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "BERT for joint intent classification and slot filling", |
|
"authors": [ |
|
{ |
|
"first": "Qian", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhu", |
|
"middle": [], |
|
"last": "Zhuo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "CoRR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qian Chen, Zhu Zhuo, and Wen Wang. 2019. BERT for joint intent classification and slot filling. CoRR, abs/1902.10909.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Speechbert: An audio-and-text jointly learned language model for end-to-end spoken question answering", |
|
"authors": [ |
|
{ |
|
"first": "Yung-Sung", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chi-Liang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hung-Yi", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lin-Shan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yung-Sung Chuang, Chi-Liang Liu, Hung-yi Lee, and Lin-Shan Lee. 2020. Speechbert: An audio-and-text jointly learned language model for end-to-end spoken question answering. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "SPLAT: speech-language joint pre-training for spoken language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Yu-An", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenguang", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu-An Chung, Chenguang Zhu, and Michael Zeng. 2021. SPLAT: speech-language joint pre-training for spoken language understanding. In NAACL-HLT.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "End-to-end spoken language understanding using transformer networks and self-supervised pretrained features", |
|
"authors": [ |
|
{ |
|
"first": "Edmilson", |
|
"middle": [], |
|
"last": "Da", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silva", |
|
"middle": [], |
|
"last": "Morais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong-Kwang Jeff", |
|
"middle": [], |
|
"last": "Kuo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zolt\u00e1n", |
|
"middle": [], |
|
"last": "T\u00fcske", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Kingsbury", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edmilson da Silva Morais, Hong-Kwang Jeff Kuo, Samuel Thomas, Zolt\u00e1n T\u00fcske, and Brian Kingsbury. 2021. End-to-end spoken language understanding using transformer networks and self-supervised pre- trained features. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "BERT: pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL-HLT (1)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: pre-training of deep bidirectional transformers for language under- standing. In NAACL-HLT (1), pages 4171-4186. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A novel bi-directional interrelated model for joint intent detection and slot filling", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Haihong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peiqing", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhongfu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meina", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haihong E, Peiqing Niu, Zhongfu Chen, and Meina Song. 2019. A novel bi-directional interrelated model for joint intent detection and slot filling. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "ASR-GLUE: A new multi-task benchmark for asr-robust natural language understanding. CoRR", |
|
"authors": [ |
|
{ |
|
"first": "Lingyun", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianwei", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deng", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Songxiang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haitao", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lingyun Feng, Jianwei Yu, Deng Cai, Songxiang Liu, Haitao Zheng, and Yan Wang. 2021. ASR-GLUE: A new multi-task benchmark for asr-robust natural language understanding. CoRR, abs/2108.13048.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "The ATIS spoken language systems pilot corpus", |
|
"authors": [ |
|
{ |
|
"first": "Charles", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Hemphill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Godfrey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Doddington", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charles T. Hemphill, John J. Godfrey, and George R. Doddington. 1990. The ATIS spoken language sys- tems pilot corpus. In HLT. Morgan Kaufmann.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "St-bert: Cross-modal language model pre-training for end-to-end spoken language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Minjeong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gyuwan", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sang-Woo", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jung-Woo", |
|
"middle": [], |
|
"last": "Ha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7478--7482", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minjeong Kim, Gyuwan Kim, Sang-Woo Lee, and Jung- Woo Ha. 2021a. St-bert: Cross-modal language model pre-training for end-to-end spoken language understanding. In ICASSP, pages 7478-7482. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Alexandros Papangelis", |
|
"authors": [ |
|
{ |
|
"first": "Seokhwan", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Karthik Gopalakrishnan, Behnam Hedayatnia, and Dilek Hakkani-Tur. 2021b. \" how robust ru?\": Evaluating task-oriented dialogue systems on spoken conversations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2109.13489" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seokhwan Kim, Yang Liu, Di Jin, Alexandros Papan- gelis, Karthik Gopalakrishnan, Behnam Hedayatnia, and Dilek Hakkani-Tur. 2021b. \" how robust ru?\": Evaluating task-oriented dialogue systems on spoken conversations. arXiv preprint arXiv:2109.13489.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Subword regularization: Improving neural network translation models with multiple subword candidates", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo. 2018. Subword regularization: Improv- ing neural network translation models with multiple subword candidates. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "An audio-enriched bert-based framework for spoken multiple-choice question answering", |
|
"authors": [ |
|
{ |
|
"first": "Chia-Chih", |
|
"middle": [], |
|
"last": "Kuo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shang-Bao", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuan-Yu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chia-Chih Kuo, Shang-Bao Luo, and Kuan-Yu Chen. 2020. An audio-enriched bert-based framework for spoken multiple-choice question answering. In IN- TERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Semi-supervised spoken language understanding via self-supervised speech and language model pretraining", |
|
"authors": [ |
|
{ |
|
"first": "-I", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yung-Sung", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hung-Yi", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shang-Wen", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cheng-I Lai, Yung-Sung Chuang, Hung-Yi Lee, Shang- Wen Li, and James R. Glass. 2021. Semi-supervised spoken language understanding via self-supervised speech and language model pretraining. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "BART: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdelrahman", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7871--7880", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: denoising sequence-to-sequence pre-training for natural language generation, translation, and com- prehension. In ACL, pages 7871-7880. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Spoken squad: A study of mitigating the impact of speech recognition errors on listening comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Chia-Hsuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Szu-Lin", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chi-Liang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hungyi", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chia-Hsuan Li, Szu-Lin Wu, Chi-Liang Liu, and Hung- yi Lee. 2018. Spoken squad: A study of mitigating the impact of speech recognition errors on listening comprehension. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Rouge: A package for automatic evaluation of summaries", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Text summarization branches out", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Attention-based recurrent neural network models for joint intent detection and slot filling", |
|
"authors": [ |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Lane", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bing Liu and Ian R. Lane. 2016. Attention-based recur- rent neural network models for joint intent detection and slot filling. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Multi-task deep neural networks for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pengcheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weizhu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaodong Liu, Pengcheng He, Weizhu Chen, and Jian- feng Gao. 2019. Multi-task deep neural networks for natural language understanding. In ACL. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Text summarization with pretrained encoders", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "EMNLP/IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu and Mirella Lapata. 2019. Text summariza- tion with pretrained encoders. In EMNLP/IJCNLP. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Speech Model Pre-Training for End-to-End Spoken Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Loren", |
|
"middle": [], |
|
"last": "Lugosch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirco", |
|
"middle": [], |
|
"last": "Ravanelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Ignoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vikrant", |
|
"middle": [], |
|
"last": "Singh Tomar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Loren Lugosch, Mirco Ravanelli, Patrick Ignoto, Vikrant Singh Tomar, and Yoshua Bengio. 2019. Speech Model Pre-Training for End-to-End Spoken Language Understanding. In Interspeech.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "The natural language decathlon: Multitask learning as question answering", |
|
"authors": [ |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Mccann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Shirish Keskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1806.08730" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bryan McCann, Nitish Shirish Keskar, Caiming Xiong, and Richard Socher. 2018. The natural language decathlon: Multitask learning as question answering. arXiv preprint arXiv:1806.08730.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Librispeech: An ASR corpus based on public domain audio books", |
|
"authors": [ |
|
{ |
|
"first": "Vassil", |
|
"middle": [], |
|
"last": "Panayotov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guoguo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Povey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Khudanpur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5206--5210", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vassil Panayotov, Guoguo Chen, Daniel Povey, and San- jeev Khudanpur. 2015. Librispeech: An ASR corpus based on public domain audio books. In ICASSP, pages 5206-5210. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Minimum word error rate training for attention-based sequence-to-sequence models", |
|
"authors": [ |
|
{ |
|
"first": "Rohit", |
|
"middle": [], |
|
"last": "Prabhavalkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tara", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Sainath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chung-Cheng", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anjuli", |
|
"middle": [], |
|
"last": "Kannan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4839--4843", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rohit Prabhavalkar, Tara N. Sainath, Yonghui Wu, Patrick Nguyen, Zhifeng Chen, Chung-Cheng Chiu, and Anjuli Kannan. 2018. Minimum word error rate training for attention-based sequence-to-sequence models. In ICASSP, pages 4839-4843. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Speechlanguage pre-training for end-to-end spoken language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Yao", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ximo", |
|
"middle": [], |
|
"last": "Bian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naoyuki", |
|
"middle": [], |
|
"last": "Kanda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhen", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yao Qian, Ximo Bian, Yu Shi, Naoyuki Kanda, Leo Shen, Zhen Xiao, and Michael Zeng. 2021. Speech- language pre-training for end-to-end spoken language understanding. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "A stack-propagation framework with token-level intent detection for spoken language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Libo", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanxiang", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yangming", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haoyang", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "EMNLP/IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Libo Qin, Wanxiang Che, Yangming Li, Haoyang Wen, and Ting Liu. 2019. A stack-propagation framework with token-level intent detection for spoken language understanding. In EMNLP/IJCNLP.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "J. Mach. Learn. Res", |
|
"volume": "21", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text trans- former. J. Mach. Learn. Res., 21:140:1-140:67.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Squad: 100, 000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100, 000+ questions for machine comprehension of text. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "End-to-end spoken language understanding using rnn-transducer ASR", |
|
"authors": [ |
|
{ |
|
"first": "Anirudh", |
|
"middle": [], |
|
"last": "Raju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gautam", |
|
"middle": [], |
|
"last": "Tiwari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milind", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Dheram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bach", |
|
"middle": [], |
|
"last": "Bui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ariya", |
|
"middle": [], |
|
"last": "Rastrow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anirudh Raju, Gautam Tiwari, Milind Rao, Pranav Dheram, Bryan Anderson, Zhe Zhang, Bach Bui, and Ariya Rastrow. 2021. End-to-end spoken language understanding using rnn-transducer ASR. CoRR, abs/2106.15919.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Ariya Rastrow, and Andreas Stolcke. 2021. DO as I mean, not as I say: Sequence loss training for spoken language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Milind", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Dheram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gautam", |
|
"middle": [], |
|
"last": "Tiwari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anirudh", |
|
"middle": [], |
|
"last": "Raju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jasha", |
|
"middle": [], |
|
"last": "Droppo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Milind Rao, Pranav Dheram, Gautam Tiwari, Anirudh Raju, Jasha Droppo, Ariya Rastrow, and Andreas Stolcke. 2021. DO as I mean, not as I say: Sequence loss training for spoken language understanding. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Speech to semantics: Improve ASR and NLU jointly via all-neural interfaces", |
|
"authors": [ |
|
{ |
|
"first": "Milind", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anirudh", |
|
"middle": [], |
|
"last": "Raju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Dheram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bach", |
|
"middle": [], |
|
"last": "Bui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ariya", |
|
"middle": [], |
|
"last": "Rastrow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Milind Rao, Anirudh Raju, Pranav Dheram, Bach Bui, and Ariya Rastrow. 2020. Speech to semantics: Im- prove ASR and NLU jointly via all-neural interfaces. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Towards an ASR error robust spoken language understanding system", |
|
"authors": [ |
|
{ |
|
"first": "Weitong", |
|
"middle": [], |
|
"last": "Ruan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaroslav", |
|
"middle": [], |
|
"last": "Nechaev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luoxin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengwei", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Imre", |
|
"middle": [], |
|
"last": "Kiss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weitong Ruan, Yaroslav Nechaev, Luoxin Chen, Cheng- wei Su, and Imre Kiss. 2020. Towards an ASR error robust spoken language understanding system. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "An overview of multi-task learning in deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Ruder. 2017. An overview of multi-task learn- ing in deep neural networks.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "A neural attention model for abstractive sentence summarization", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander M. Rush, Sumit Chopra, and Jason Weston. 2015. A neural attention model for abstractive sen- tence summarization. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Alexandre Caulier, Th\u00e9odore Bluche, Thibault Gisselbrecht, and Ma\u00ebl Primet", |
|
"authors": [ |
|
{ |
|
"first": "Alaa", |
|
"middle": [], |
|
"last": "Saade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Dureau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Leroy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francesco", |
|
"middle": [], |
|
"last": "Caltagirone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alice", |
|
"middle": [], |
|
"last": "Coucke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrien", |
|
"middle": [], |
|
"last": "Ball", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cl\u00e9ment", |
|
"middle": [], |
|
"last": "Doumouro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thibaut", |
|
"middle": [], |
|
"last": "Lavril", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "EMC2@NeurIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alaa Saade, Joseph Dureau, David Leroy, Francesco Caltagirone, Alice Coucke, Adrien Ball, Cl\u00e9ment Doumouro, Thibaut Lavril, Alexandre Caulier, Th\u00e9odore Bluche, Thibault Gisselbrecht, and Ma\u00ebl Primet. 2019. Spoken language understanding on the edge. In EMC2@NeurIPS.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Integration of pre-trained networks with continuous token interface for end-to-end spoken language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Seunghyun", |
|
"middle": [], |
|
"last": "Seo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghyun", |
|
"middle": [], |
|
"last": "Kwak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowon", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seunghyun Seo, Donghyun Kwak, and Bowon Lee. 2021. Integration of pre-trained networks with con- tinuous token interface for end-to-end spoken lan- guage understanding. CoRR, abs/2104.07253.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Natural TTS synthesis by conditioning wavenet on MEL spectrogram predictions", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruoming", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navdeep", |
|
"middle": [], |
|
"last": "Jaitly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zongheng", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuxuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rj-Skerrv", |
|
"middle": [], |
|
"last": "Ryan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rif", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Saurous", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannis", |
|
"middle": [], |
|
"last": "Agiomyrgiannakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Shen, Ruoming Pang, Ron J. Weiss, Mike Schuster, Navdeep Jaitly, Zongheng Yang, Zhifeng Chen, Yu Zhang, Yuxuan Wang, RJ-Skerrv Ryan, Rif A. Saurous, Yannis Agiomyrgiannakis, and Yonghui Wu. 2018. Natural TTS synthesis by con- ditioning wavenet on MEL spectrogram predictions. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Spoken language intent detection using confusion2vec", |
|
"authors": [ |
|
{ |
|
"first": "Mu", |
|
"middle": [], |
|
"last": "Prashanth Gurunath Shivakumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Panayiotis", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Georgiou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Prashanth Gurunath Shivakumar, Mu Yang, and Panayi- otis G. Georgiou. 2019. Spoken language intent de- tection using confusion2vec. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Phoneme-bert: Joint language modelling of phoneme sequence and asr transcript", |
|
"authors": [ |
|
{ |
|
"first": "Ayush", |
|
"middle": [], |
|
"last": "Mukuntha Narayanan Sundararaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jithendra", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vepa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mukuntha Narayanan Sundararaman, Ayush Kumar, and Jithendra Vepa. 2021. Phoneme-bert: Joint lan- guage modelling of phoneme sequence and asr tran- script. CoRR, abs/2102.00804.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Glue: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. Glue: A multi-task benchmark and analysis platform for natural language understanding.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Encoding syntactic knowledge in transformer encoder for intent detection and slot filling", |
|
"authors": [ |
|
{ |
|
"first": "Jixuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Radfar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jixuan Wang, Kai Wei, Martin Radfar, Weiwei Zhang, and Clement Chung. 2021. Encoding syntactic knowledge in transformer encoder for intent detec- tion and slot filling. In AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Knowledge distillation for improved accuracy in spoken question answering", |
|
"authors": [ |
|
{ |
|
"first": "Chenyu", |
|
"middle": [], |
|
"last": "You", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nuo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuexian", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenyu You, Nuo Chen, and Yuexian Zou. 2021. Knowledge distillation for improved accuracy in spo- ken question answering. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Joint slot filling and intent detection via capsule neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Chenwei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaliang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenwei Zhang, Yaliang Li, Nan Du, Wei Fan, and Philip S. Yu. 2019. Joint slot filling and intent detec- tion via capsule neural networks. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Pushing the limits of semisupervised learning for automatic speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chung-Cheng", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruoming", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu Zhang, James Qin, Daniel S. Park, Wei Han, Chung- Cheng Chiu, Ruoming Pang, Quoc V. Le, and Yonghui Wu. 2020. Pushing the limits of semi- supervised learning for automatic speech recognition. CoRR, abs/2010.10504.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Different Implementations of Spoken Language Tasks.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"text": "T-SNE Visualization of BART's last hidden state features. Red and blue represent ATIS and SLURP datasets, green denotes Spoken-SQuAD dataset, purple denotes Spoken-Gigaword dataset.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"uris": null, |
|
"text": "seq for ASR and NLP end 4. Compute gradient: \u2207(\u03b8) 5. Update model: \u03b8 = \u03b8 \u2212 \u03f5\u2207(\u03b8) end end", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"uris": null, |
|
"text": "R1(\u2191) R2(\u2191) RL(\u2191) WER(\u2193) EM(\u2191) F1(\u2191) WER(\u2193) Acc(\u2191) F1(\u2191) WER(\u2193) Acc(\u2191) SLU-F1(\u2191)", |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"content": "<table><tr><td>Q1</td><td>When should I turn off bedroom light?</td></tr><tr><td>Q2</td><td>When do I go to the airport?</td></tr><tr><td/><td>Output</td></tr><tr><td colspan=\"2\">Sum. turn off the bedroom light</td></tr><tr><td colspan=\"2\">Intent hue_lightoff</td></tr><tr><td>Slots</td><td>[date : tomorrow], [time : nine thirty pm]</td></tr><tr><td/><td>[house_place : bedroom],</td></tr><tr><td colspan=\"2\">Ans1. nine thirty pm</td></tr><tr><td colspan=\"2\">Ans2. tomorrow</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "and the evolution of pre-trainedInputSpeech I am going to the airport tomorrow, please turn off bedroom light at nine thirty pm." |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td/><td/><td/><td>Ground Truth</td><td>Beam Candidate</td></tr><tr><td>ASR Loss</td><td/><td/><td>Generation Loss + Classification Loss</td></tr><tr><td colspan=\"2\">Transcriptions</td><td>Encoder</td><td>Decoder</td></tr><tr><td><eos></td><td>Sequence Loss</td><td/><td>Sum. 1 Sum. 2 \u2026 Intent 1 Intent 2 \u2026 Slots 1 Slots 2 \u2026 Ans. Sequence Loss</td></tr><tr><td>Figure 3:</td><td/><td/></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Our proposed MTL framework for LAS-BART-based Spoken Language Models. The model consists of an ASR system to generate transcription for the input audio frames, and an encoder-decoder system to generate intents, slots, answers, summarizations for different tasks. They share parameters of LAS, BART encoder and decoder, and are first trained on multiple tasks with ASR Loss, Generation Loss and Classification Loss; then the two systems are jointly trained with Sequence Loss." |
|
}, |
|
"TABREF5": { |
|
"content": "<table><tr><td>5 Analysis</td></tr><tr><td>5.1 Effect of MTL</td></tr><tr><td>MTL on</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Comparison with existing works on SLURP. NLU* represents the results from SLURP paper. NLU+ represents the results from a recently released paper." |
|
}, |
|
"TABREF7": { |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Comparison results on ATIS test set." |
|
}, |
|
"TABREF9": { |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Ablation study on Post Fine-tuning." |
|
}, |
|
"TABREF10": { |
|
"content": "<table><tr><td colspan=\"2\">Types</td><td>Spoken-Gigaword</td></tr><tr><td colspan=\"2\">Training Set</td><td>249199</td></tr><tr><td colspan=\"2\">Validation Set</td><td>12578</td></tr><tr><td/><td>words</td><td>119M</td></tr><tr><td/><td>uni-words</td><td>110K</td></tr><tr><td>Article</td><td>aver length</td><td>14.6</td></tr><tr><td/><td>max length</td><td>30</td></tr><tr><td/><td>min length</td><td>11</td></tr><tr><td/><td>words</td><td>31M</td></tr><tr><td/><td>uni-words</td><td>69K</td></tr><tr><td>Headline</td><td>aver words</td><td>8.3</td></tr><tr><td/><td>max length</td><td>30</td></tr><tr><td/><td>min length</td><td>2</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "https://github.com/mozilla/TTS/" |
|
}, |
|
"TABREF11": { |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Statistics of the generated Spoken-gigaword." |
|
} |
|
} |
|
} |
|
} |