|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:46:05.836332Z" |
|
}, |
|
"title": "Multi-task Learning of Spoken Language Understanding by Integrating N-Best Hypotheses with Hierarchical Attention", |
|
"authors": [ |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Cambridge", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Xinyue", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Cambridge", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Weitong", |
|
"middle": [], |
|
"last": "Ruan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Cambridge", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Soldaini", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Cambridge", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Wael", |
|
"middle": [], |
|
"last": "Hamza", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Cambridge", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Chengwei", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Cambridge", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Currently, in spoken language understanding (SLU) systems, the automatic speech recognition (ASR) module produces multiple interpretations (or hypotheses) for the input audio signal and the natural language understanding (NLU) module takes the one with the highest confidence score for domain or intent classification. However, the interpretations can be noisy, and solely relying on one interpretation can cause information loss. To address the problem, many research works attempt to rerank the interpretations for a better choice while some recent works get better performance by integrating all the hypotheses during prediction. In this paper, we follow the way of integrating hypotheses but strengthen the training mode by involving more tasks, some of which may be not in existing tasks of NLU but relevant, via multi-task learning or transfer learning. Moreover, we propose the Hierarchical Attention Mechanism (HAM) to further improve the performance with the acoustic-model features like confidence scores, which are ignored in the current hypotheses integration models. The experimental results show that compared to the standard estimation with one hypothesis, the multi-task learning with HAM can improve the domain and intent classification by relatively 19% and 37%, which are much higher than improvements with current integration or reranking methods. To illustrate the cause of improvements brought by our model, we decode the hidden representations of some utterance examples and compare the generated texts with hypotheses and transcripts. The comparison shows that our model could recover the transcription by integrating the fragmented information among hypotheses and identifying the frequent error patterns of the ASR module, and even rewrite the query for a better understanding, which reveals the characteristic of multi-task learning of broadcasting knowledge.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Currently, in spoken language understanding (SLU) systems, the automatic speech recognition (ASR) module produces multiple interpretations (or hypotheses) for the input audio signal and the natural language understanding (NLU) module takes the one with the highest confidence score for domain or intent classification. However, the interpretations can be noisy, and solely relying on one interpretation can cause information loss. To address the problem, many research works attempt to rerank the interpretations for a better choice while some recent works get better performance by integrating all the hypotheses during prediction. In this paper, we follow the way of integrating hypotheses but strengthen the training mode by involving more tasks, some of which may be not in existing tasks of NLU but relevant, via multi-task learning or transfer learning. Moreover, we propose the Hierarchical Attention Mechanism (HAM) to further improve the performance with the acoustic-model features like confidence scores, which are ignored in the current hypotheses integration models. The experimental results show that compared to the standard estimation with one hypothesis, the multi-task learning with HAM can improve the domain and intent classification by relatively 19% and 37%, which are much higher than improvements with current integration or reranking methods. To illustrate the cause of improvements brought by our model, we decode the hidden representations of some utterance examples and compare the generated texts with hypotheses and transcripts. The comparison shows that our model could recover the transcription by integrating the fragmented information among hypotheses and identifying the frequent error patterns of the ASR module, and even rewrite the query for a better understanding, which reveals the characteristic of multi-task learning of broadcasting knowledge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In an SLU system (Tur and De Mori, 2011) , the domains and intents are usually inferred by natural language understanding (NLU) modules with the hypotheses mapped from input speech by ASR module. For each speech audio, the transferred hypothesis is the one with the highest recognition score. However, due to the unsatisfactory ASR accuracy (Xiong et al., 2018; Barker et al., 2018) , the 1-best hypothesis may contain errors. To solve the problem, there are some research works rescoring (reranking) the -best hypotheses to reduce the word error rate (WER) by dual comparison with a discriminative language model (Ogawa et al., 2018; Ogawa et al., 2019) ; or involving morphological, lexical, syntactic or confidence score features for reranking (Sak et al., 2011; Collins et al., 2005; Chan and Woodland, 2004; Peng et al., 2013; Morbini et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 40, |
|
"text": "(Tur and De Mori, 2011)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 361, |
|
"text": "(Xiong et al., 2018;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 362, |
|
"end": 382, |
|
"text": "Barker et al., 2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 614, |
|
"end": 634, |
|
"text": "(Ogawa et al., 2018;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 635, |
|
"end": 654, |
|
"text": "Ogawa et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 747, |
|
"end": 765, |
|
"text": "(Sak et al., 2011;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 766, |
|
"end": 787, |
|
"text": "Collins et al., 2005;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 788, |
|
"end": 812, |
|
"text": "Chan and Woodland, 2004;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 813, |
|
"end": 831, |
|
"text": "Peng et al., 2013;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 832, |
|
"end": 853, |
|
"text": "Morbini et al., 2012)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In contrast to the reranking models, which predict only one hypothesis with the lowest WER and transfer that hypothesis to NLU modules, there is recently another attempt to integrate the fragmented information among the -best hypotheses by feeding all the hypotheses together to NLU modules Li, 2020) . The proposed approaches to integrating hypotheses include hypothesized text This work is licensed under a Creative Commons Attribution 4.0 International License. License details: http: //creativecommons.org/licenses/by/4.0/.", |
|
"cite_spans": [ |
|
{ |
|
"start": 291, |
|
"end": 300, |
|
"text": "Li, 2020)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We use ASR -best hypotheses or -bests to denote the top interpretations of a speech and the 1-best or 5-best stands for the top 1 or 5 hypotheses. The hypotheses are ranked by the associated confidence scores. concatenation (Combined Sentence) and hypotheses embedding concatenation (PoolingAvg and Pooling-Max) . Compared to the accuracy on the oracle reranking results (i.e., picking the hypothesis most similar to transcription), the PoolingAvg achieves much higher improvements for the NLU tasks. However, the integration framework can be further improved by introducing more tasks with Multi-Task Learning (MTL) or Transfer Learning and involving more features to optimize the integration process.", |
|
"cite_spans": [ |
|
{ |
|
"start": 283, |
|
"end": 311, |
|
"text": "(PoolingAvg and Pooling-Max)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "MTL (Zhang and Yang, 2017; Liu et al., 2019; Caruana, 1997) is a widely used machine learning paradigm for simultaneously training related tasks. In the MTL training, one task can apply the knowledge learned from others. MTL can improve the generalization of the trained model by avoiding overfitting to a single task and make full use of all the labeled data from all tasks to solve the issue of insufficient training data. The MTL has been shown efficient for some natural language processing tasks outside the SLU system like text similarity, pairwise text classification (Liu et al., 2019) . In contrast to multi-task learning, by transfer learning or domain adaption (Pan and Yang, 2009; Howard and Ruder, 2018; Torrey and Shavlik, 2010) , some tasks (source tasks) can be trained in the first stage knowing nothing about the other tasks (target tasks). While in the second stage, the embeddings from pre-trained model are fine-tuned according to the target mission. The transfer learning cares more about the target tasks. Some popular fine-tunable pre-trained models like BERT (Devlin et al., 2018) , ELMO (Peters et al., 2018) nail down the transfer learning in NLP.", |
|
"cite_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 26, |
|
"text": "(Zhang and Yang, 2017;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 27, |
|
"end": 44, |
|
"text": "Liu et al., 2019;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 45, |
|
"end": 59, |
|
"text": "Caruana, 1997)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 575, |
|
"end": 593, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 681, |
|
"end": 692, |
|
"text": "Yang, 2009;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 693, |
|
"end": 716, |
|
"text": "Howard and Ruder, 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 717, |
|
"end": 742, |
|
"text": "Torrey and Shavlik, 2010)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1084, |
|
"end": 1105, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1113, |
|
"end": 1134, |
|
"text": "(Peters et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of the paper is organized as follows. Sec. 2 presents various models and training paradigms explored in this work. Sec. 3 describes our experimental details, results and analysis. Sec. 4 concludes our findings and discusses the future directions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We start by reviewing different categories of SLU system designs in Sec. 2.1. Those designs have achieved great success, but they are trained solely on one task and cannot borrow the knowledge from some relevant tasks like transcription reconstruction. To involve more tasks during training, we explore two paradigms to: 1) train them simultaneously in a single stage (Sec. 2.2), which is actually multi-task learning; 2) train them asynchronously (Sec. 2.3) in multiple stages, which includes two ways of using the pre-trained model from the first stage (transfer learning or text generation). In Sec. 2.4, we illustrate the importance of acoustic-model features and the way of utilizing them hierarchically. Figure 1 : The pipelines of current SLU systems with various ways to exploit hypotheses (with the Oracle).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 710, |
|
"end": 718, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We firstly review the current designs for the SLU system in Figure 1 , which include the standard pipeline (Baseline), Reranking and Integration models. In production, the input audio is transcribed by ASR to get -best hypotheses. Then, the Baseline model will take the one with the highest confidence score for NLU tasks. Nevertheless, the Reranking models do not solely rely on the confidence scores generated by the ASR module. They prefer to rescore the interpretations based on more features like semantic information and choose the most reliable one. Both Baseline and Reranking models transfer one sentence to the NLU module for classification. However, some recent works like indicate this causes information loss and attempt to use all the hypotheses during classification. They embed each hypothesis to one vector and unify the vectors to one by a pooling layer, which becomes the input to the NLU task. Ideally, we can make the hypothesis close to the transcribed sentence by humans. To know the ceiling point of performance, there is always the Oracle model predicting with human transcriptions. Figure 2 : The architecture of multi-task training in a single stage or different stages. The left side is training all tasks (TR, DC, IC) in the same stage while the right side is to train TR firstly and fine-tune or generate texts base on the pre-trained TR model for DC and IC.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 68, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1108, |
|
"end": 1116, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Standard SLU, Reranking, Integration And Oracle", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Although the current approaches have gained a large improvement, their training is exclusive for one task each time and overlooks the knowledge from other tasks, which can be improved by considering more relevant tasks simultaneously with MTL. The left side of Figure 2 shows the design of training multiple tasks (transcription reconstruction, domain classification and intent classification) simultaneously for integrating -bests. The lower layers are shared and the top two layers are task-specific.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 269, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Shared Layers: In shared layers, the input = { 1 , ... }, are hypotheses generated by the ASR module for one speech. To decrease the embedded vocabulary size, the hypothesis is split to subword units (byte of pairs or BPs) in 1 by a byte pair encoder (Sennrich et al., 2015) and each BP is embedded to a vector in 2 . Then, the BiLSTM encoder gets contextualized representations for the BPs ( ,1 ... , ) of the hypothesis ( ) containing byte pairs as follow:", |
|
"cite_spans": [ |
|
{ |
|
"start": 251, |
|
"end": 274, |
|
"text": "(Sennrich et al., 2015)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "(\u210e ,1 , ..., \u210e , ) \u2190 ( ,1 , ..., , ).", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Each hidden state is the concatenation of the forward and backward directions, e.g.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "[\u210e ,1 , \u210e ,1 ],", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where means forward and means backward. The finally utilized output state for is the concatenation of the last hidden state of the forward and backward LSTM, i.e. \u210e = [\u210e ,1 , \u210e , ]. To integrate the output states of all hypotheses, we follow the empirically best approach in , PoolingAvg, which firstly pads into the output state of the first best hypothesis by \u2212 times when the amount of hypotheses, is smaller than . Then, a unified representation \u210e can be achieved by average pooling ( by 1 sliding window and stride 1) for the output states in layer 4 . In the PoolingAvg, the unified representation is used to predict the domain or intent and all the parameters are trained by the cross entropy loss for the classification task. While in our method, we introduce a new task and train tasks simultaneously. Below, we discuss the task specific layers and the training objective.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Transcription Reconstruction (TR): For all the natural language understanding tasks, it is important to obtain a high-quality unified representation of the incoming utterance. To assure the quality of \u210e , we consider the task to reconstruct transcription by an LSTM decoder adopting \u210e as the initial state of its first recurrent layer. Once the decoder's output is close to transcription, it shows the representation contains the high-quality information of transcription. The task is trained based on cross entropy loss:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "L \u2212 = | | =1 | | =1 , (1/\u02c6 , ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "( 2)The is the transcription while the represents the \u210e byte pair inside . The represents the \u210e byte pair in the vocabulary. Each time,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": ", is 1 when the \u210e byte pair is the \u210e entry of vocabulary and 0 otherwise.\u02c6 , is the predicted probability that the \u210e byte pair should appear at \u210e position. With the transcription reconstruction, the model can learn some erroneous patterns between the best hypotheses and target transcriptions and recover accordingly. For example, one phrase may always be mis-recognized as another phrase by an ASR module. During our evaluation, with a set of utterance examples, we decode the hidden states and show the recovering capability.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Domain Classification (DC): With the same output hidden state, we could as well predict the domains (e.g. music, weather or knowledge) by a multilayer perceptron (MLP) (Mather and Tso, 2016) module. The loss for the DC task is:", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 190, |
|
"text": "(Mather and Tso, 2016)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L \u2212 = | | =1 , (1/\u02c6 , ).", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The , is the indicator function which equals to 1 when the utterance belongs to the \u210e domain of the candidates set . The\u02c6 , is the predicated probability,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u02c6 , = ( \u2212 (\u210e )),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where the \u2212 contains the parameters to be trained in DC task. Intent Classification (IC): Then, we could further utilize \u210e for domain-specific intent prediction with another MLP module. For an incoming utterance, it is usually firstly classified to one domain and the intent classification will be domain-specific (Tur and De Mori, 2011) . The loss of the IC task is:", |
|
"cite_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 337, |
|
"text": "(Tur and De Mori, 2011)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L \u2212 = | | =1 , (1/\u02c6 , ),", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where the , is 1 when the utterance should be classified to the \u210e intent. The\u02c6 , = softmax( \u2212 (\u210e unified )), where\u02c6 , is the predicted probability of the utterance belonging to the \u210e intent and \u2212 contains the task-specific parameters. Training Objective: For the PoolingAvg method, the objective is to minimize the L \u2212 or L \u2212 , while for our MTL framework, the objective is minimizing", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L = L \u2212 /| | + L \u2212 + L \u2212 ,", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where the , and are the weights of the loss functions associated with corresponding tasks. Since for one utterance, the target transcription contains multiple words and the L \u2212 is the sum of loss for all the words, we utilize the normalized version of the transcription reconstruction loss.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The Language Model: During our experiments, we also tried the multi-layer transformer (Vaswani et al., 2017) for the encoder. We find it costs more for training or evaluation while bringing no improvements. In addition, since the length of hypotheses varies, it is hard to align variant-length output states of different -best hypotheses and exploit the attention between encoder and decoder (if we also use the Transformer's decoder) for the TR task. Thus, we exploit the BiLSTM encoder and LSTM decoder.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 108, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning: Training Tasks Simultaneously in A Single Stage (MT_S)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Another way to train the above-mentioned tasks is in different stages as shown in the right part of Figure 2 . Inasmuch as for all the NLU tasks, it is necessary to obtain a high-quality hypothesis representation. We prioritize the training of TR in the first step and let all NLU models share the same pre-trained TR model. The approaches of exploiting the pre-trained model are introduced as follows. Prob(ASR 1st is not the best)", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 109, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Multi-task Training in Multiple Stages (MT_M) with Transfer Learning or Text Generation", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Confidence Score Rel Diff (/5%) Figure 5 : Confidence score relative difference (bin size 5%)", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 40, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Multi-task Training in Multiple Stages (MT_M) with Transfer Learning or Text Generation", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Transfer Learning: One way to use the TR task from the first step is transfer learning, where we regard the TR as the pre-training step and let the DC, IC tasks adapt the knowledge by fine-tuning. We call the method following this idea as Transfer Learning (TL). The parameters of the pre-trained TR's shared layers, including the embedding of byte pairs and the BiLSTM encoder, are used as the initial value. The TR task-specific parameters like the decoder part's are discarded. Then, the shared layers' parameters and task-specific layer's parameters, in \u2212 or \u2212 , are all trained during the fine-tuning step. Although DC and IC share the same initialization parameters, their fine-tuned models are separate. The benefit of the two-step training is that the model and knowledge from pre-training step can serve multiple downstream tasks. In addition, with the well-initialized parameters, it saves much time for fine-tuning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Training in Multiple Stages (MT_M) with Transfer Learning or Text Generation", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "MT_M with Text Generation: Since the TR model has been tuned to recover the errors contained in the ASR -bests, we can firstly evaluate it to generate the text closer to transcription. Then, the domain or intent can be predicted based on the generated text. This method is called Multi-task Multi-stage with Text Generation (MMTG). At this moment, the input to DC or IC is only one generated hypothesis instead of -bests. To predict with one hypothesis, we can exploit the IC or DC models pre-trained on transcription or 1-best, which only expect one sentence as the input.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Training in Multiple Stages (MT_M) with Transfer Learning or Text Generation", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "All the above algorithms treat the input hypotheses as normal natural language to process but ignore that the hypotheses are generated by ASR and associated with more acoustic-model information than the text itself. For example, the position information (whether the hypothesis is the first best or the last best), the difference of confidence score associated with the first best and second best hypothesis, etc. The acoustic-model features have been proven to be valuable for many applications including: 1) the arbitration task to select the best among client and service recognition results (Kumar et al., 2015) , 2) the Recognizer Output Voting Error Reduction (ROVER) (Fiscus, 1997), which takes the outputs generated by multiple ASR systems to generate one output with reduced error rate, 3) confidence normalization (Kumar et al., 2014) , etc. In this section, we would like to introduce those features, why they can be helpful, and how they can hierarchically take part in the shared layers in the left side of Figure 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 595, |
|
"end": 615, |
|
"text": "(Kumar et al., 2015)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 824, |
|
"end": 844, |
|
"text": "(Kumar et al., 2014)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1020, |
|
"end": 1028, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention on Byte Pair Embedding and Hypothesis Integration Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Those features can be divided into three categories including confidence-score features, positional information and confusibility. We illustrate their close relationship with the hypothesis quality as following.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic-model Information", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "(a) Confidence-score Features: The confidence scores quantitatively represent the correctness of recognized hypotheses and words in a [0\u2030,1000\u2030] range. Plenty of previous research works have proven the effectiveness of those features. Here, we take the confidence score of hypothesis as an example to show the valuable information contained in confidence scores. For each utterance in the training set, we evaluate the probability that ASR 1-best is not the best for different scales of the ASR 1-best confidence score. A hypothesis is the best when it is the most similar one to the transcription considering the edit distance. In Figure 3 , it is obvious that a higher confidence score means a higher probability that the ASR 1-best is the best one among hypotheses.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 632, |
|
"end": 640, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Acoustic-model Information", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "(b) Positional Information: The ranking position of the hypothesis is another important information. To show its significance, we gain the distribution of exact matchings, i.e. the hypothesis is the same as the transcription, between different ranking positions and the transcription. Among all the exact matching cases, 50% appear at the first best hypothesis while 19%, 13%, 10% and 6% occur at the 2 , 3 , 4 \u210e , and 5 \u210e best hypothesis. Hence, a more forward position does indicate a higher recognition quality.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic-model Information", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "(c) Confusibility: The features of the confusibility category include the difference ( 1 \u2212 ) and relative difference ( / 1 ) of confidence score between the ASR 1-best and the others. The larger difference implies the lower confusibility to choose the first hypothesis as the best. As the confidence score of the first best should be larger or equal to the others, the difference and relative difference are non-negative values measuring the degree of outperforming. In the Figure 4 and 5, there is a trend that the larger difference (between ASR first and second best) implies the lower probability that ASR 1-best is not the best, which means it is easier to determine the ASR 1-best as the best. Here, we only show the difference to the second best as an example. While in later designs, the features will be formed based on the difference between each -best's confidence score and the 1-best's.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 474, |
|
"end": 482, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Acoustic-model Information", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "We have shown that the acoustic-model information reveals the quality of recognition and to exploit them, we add them into shared layers hierarchically. The HAM is proposed by the hierarchical structure of the -bests (BPs from a hypothesis, a hypothesis from -bests). Similar hierarchical structures are realized in different areas, where various kinds of information like documentation (Yang et al., 2016) , knowledge graphs (Hu et al., 2015) , Internet network (Li et al., 2018) , or voice queries (Rao et al., 2017) are encoded. While integrating -bests, the process is building representation for one hypothesis from BPs and then aggregating them into an -bests representation. We likewise exploit the acoustic-model information hierarchically to BPs embedding (HAM_BP) and then to the aggregation of hypotheses (HAM_H). The HAM_ALL exploits the information in both layers. Figure 6 right side, we show the way of involving the byte pair acoustic-model information in the byte pair embedding layer of Figure 2 left side. Instead of concatenating the last hidden state of forward and backward LSTM for hypothesis embedding ( Figure 6 left side), we would like to consider the quality of each byte pair and take into account the entire sequence of hidden states. To exploit the information, we firstly need to figure out the problem of missing acoustic-model information for byte pairs because we only have the confidence scores associated with words. Since it can be ensured that each byte pair only belongs to one word, we can assign the confidence score for a byte pair according to its parent, i.e. the word. For example, in Figure 6 , the \"low\" and \"-er\" are two byte pairs of the word \"lower\", so they share the same confidence score of the word \"lower\", i.e. 0.9. To use the confidence scores as attention scores, we can normalize them by Softmax or convert them to bin value or logarithmic scale, etc (shown by ( ) in Figure 6 ). The attention score matrix is multiplied with the hidden vectors matrix of the BiLSTM embedding, where each hidden vector concatenates its forward and backward states. The mean (from pooling) of weighted hidden state vectors forms a single vector for each hypothesis and the vector will participate in the following hypotheses integration layer. Figure 7 illustrates the integration of hypotheses with their associated acoustic-model information. The left side of the dotted line in Figure 7 is the way to integrate without the acoustic-model features in Figure 2 layer 4 , where hypotheses embeddings transferred from the 3 layer are combined by the pooling operation. The problem is that the normal pooling layer treats hypotheses equally, although the quality of the -best hypotheses actually varies. We add a Multiple Layer Perceptron (MLP), i.e. Feedforward (FFW), to synthesize all the features revealing the quality of each hypothesis, including the positional information, confidence score, confidence score difference and confidence score relative difference. The output of FFW is normalized via Softmax and works as the attention scores or weights. In MatMul, we multiply the attention score matrix and the hypothesis embedding matrix. Finally, the weighted embeddings are combined through pooling. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 387, |
|
"end": 406, |
|
"text": "(Yang et al., 2016)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 443, |
|
"text": "(Hu et al., 2015)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 463, |
|
"end": 480, |
|
"text": "(Li et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 500, |
|
"end": 518, |
|
"text": "(Rao et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 878, |
|
"end": 886, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1005, |
|
"end": 1013, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1128, |
|
"end": 1137, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1632, |
|
"end": 1640, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1929, |
|
"end": 1937, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2287, |
|
"end": 2295, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 2424, |
|
"end": 2432, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 2496, |
|
"end": 2504, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention Mechanism (HAM)", |
|
"sec_num": "2.4.2" |
|
}, |
|
{ |
|
"text": "Our data consists of \u223c 9M anonymized English utterances. The utterances are divided into training, development and testing parts with 8:1:1 ratio. They are annotated with 23 domains and further classified into different intents for each domain. The transcripts are hand-transcribed by humans.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The compared approaches include the Baseline model and Oracle model mentioned in Sec. 2.1, PoolingAvg, Oracle of Reranking Model and the approaches mentioned in this paper. The PoolingAvg is the foremost one among all the models integrating -best hypotheses . The Oracle of Reranking Model makes prediction by the hypothesis most similar to the transcription each time. As for the models in this paper, they include the multi-task training in a single stage (MTL, Sec. 2.2), or in different stages (TL and MMTG, Sec. 2.3) and the HAM (Sec. 2.4). The HAM actually modifies the shared layers and is not task-specific, so it is possible to combine it with the MTL or transfer learning mechanism. For example, MTL means using HAM to modify shared layers and training with mode of MTL. For all the models, the byte pairs are embedded to a 128-dimensional space. The hidden states in the BiLSTM encoder of the shared layers and the LSTM decoder of task TR are both 512-dimensional. The training iterator is a fixed mini-batch iterator with size 128 and each model is trained for ten epochs, while the model providing the highest performance for the development data is selected. Table 1 compares the domain classification performance of all the models. As seen from the results of the entire test set, the transfer learning, multi-task learning and the improved versions with HAM are all better than the existing methods. Among the ways of training multiple tasks synchronously or asychronously, the MTL works the best. In our experiments, we have tried different hyperparametric values in formula 5. Since the predictions of IC is based on the specific domain predicted in DC, we only have two tasks (DC, TR) and associated hyperparametrics ( , ) for MTL in the domain classification. We tried ratios of : with 1 : , \u2208 1...10. Here, we assign the weight for DC as a larger one because we care more about the DC task performance, while the TR is actually an auxillary task. Through experiments, we find the performance for ratio 1 : , \u2208 1...3 is comparable to each other and better than the rests and we show the results for ratio 1 : 1 in Table 1 . With the acoustic-model information, the performance of MTL is further improved. Exploiting the acoustic-model information hierarchically on both hypotheses and byte pairs layers, i.e. HAM_ALL, is better than on one layer (HAM_H, HAM_BP).", |
|
"cite_spans": [ |
|
{ |
|
"start": 498, |
|
"end": 511, |
|
"text": "(TL and MMTG,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 512, |
|
"end": 521, |
|
"text": "Sec. 2.3)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1173, |
|
"end": 1180, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 2134, |
|
"end": 2141, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset and Models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To reveal the reason of improvements, we split the entire test set into two parts by whether the 1-bests agree with transcriptions or not and evaluate respectively. Comparing the Agree and Disagree part, we find that the gained improvements of models in MTL, TL and MTL with HAM mainly come from the disagreed part. This indicates that integrating more hypotheses could help more when 1-best differs from transcription. Later, we will illustrate the reasons more visually with some utterance examples. We use the well-trained MTL on some utterance examples to predict their domains and decode the integrated hypotheses embedding (hidden representations) with Beam Search Decoder (beam size 1) to compare the generated text with the ASR hypotheses, transcription. In Table 2 , the first three columns are the predicted domains of Baseline model , MTL model (predicted by the hidden representation) and the Real domain. The other columns compare the transcriptions, the decoder's generated text in MTL and the -bests. Due to space limit, we only choose three typical example with their top 3 hypotheses. The number of hypotheses varies, so we use None for the missing one.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 766, |
|
"end": 773, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Domain Classification", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Why are we better than Baseline on Disagree Part? The reasons MTL can outperform the Baseline model's prediction on Disagree Part can be categorized into two as follows. 1) Choose the best from ASR 2-bests (e.g. the third row in Table 2 ): In this condition, there is a high-quality hypothesis (\"play muse\") within the -bests. The position of the hypothesis is not the first one but we can correctly identify it in the generated text. 2) Integrate fragmented information (e.g. the forth row in Table 2 ): In this condition, the transcription \"harry porter\" spread out over hypotheses. The \"harry\" is in the third best while the \"porter\" is in the first best. We can collect the information and recover the \"harry porter\" in the generated text. The ability of integration can thus been shown. The ability can be obtained by learning the error patterns between ASR hypotheses and transcriptions during the TR task. Why are we even better than Baseline on Agree Part? The transcriptions should be the golden information but we can still outperform the Baseline's prediction from ASR 1-best when ASR 1-best agrees with the transcription. The reason is Query rewriting (e.g. the fifth row): We find the trained model attempt to rewrite transcription when it may cause misunderstanding. In the fifth row, while the transcription is \"how the call service work\", the trained model replaces the sensitive word \"call\" with another word \"remote\" with similar meaning or embedding position. The word \"call\" is a sensitive word because it always occurs in the Communication domain, which can make the predictor mis-classify it. However, the word \"remote\" is not sensitive but semantically similar to the word \"call\". This example is also a perfect demo to show the effect of multi-task learning. The multi-task learning here is to find the balance point between the domain classification and transcription reconstruction. Considering both tasks will propel the model to rewrite a query with a similar semantic meaning and avoid misunderstanding.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 236, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 501, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Real Effect of MTL: Analysis of Utterance Examples", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We summarize the different causes of improvements by some utterance examples here to offer an insight of the model's real effect. However, we do not show the numerical analysis like WER because it is hard to evaluate whether the decode generation is high-quality considering the query rewriting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Real Effect of MTL: Analysis of Utterance Examples", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Now, we compare more specifically the MTL, MTL _ and PoolingAvg on 8 important domains out of the whole 23 domains in Figure 8 . The performance of each of the three models will be compared to the baseline model and the relative error reduction (RErr) is shown. This result shows that the MTL gains more improvements than the best integration model PoolingAvg for all the 8 domains while the HAM can enhance the performance on almost all 8 domains (except an acceptable decay for Shopping).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 126, |
|
"text": "Figure 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Improvements on Different Domains and Different Numbers of Hypotheses", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "All the previous results of models based on -best actually utilize 5-best hypotheses and we also want to see the performance with different number of hypotheses. In Figure 9 , we could find the best model is always MTL _ for different numbers of utilized hypotheses. There is also a trend that after 4 hypotheses are utilized the growth become more gentle. The lines for Baseline and UpperBound are flatten because they are only based on ASR 1-bests and transcriptions. We only show the performance until 5 hypotheses are utilized because: 1) Most of our ASR recognition results only contain at most 5-bests; 2) In production, the more hypotheses are utilized, the slower it will be for training and testing. We only want to afford up to 5 hypotheses considering response delay.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 173, |
|
"text": "Figure 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Improvements on Different Domains and Different Numbers of Hypotheses", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Another task, intent classification, is domain specific and we show the IC of 3 important domains. Table 3 shows the relative error reduction compared to Baseline model. The multi-task learning for intent classification considers both the intent classification and transcription reconstruction. The result showed is under the loss ratio : = 1:1 for the two tasks. We can find the MTL _ and MTL outperforms the foremost PoolingAvg for all three domains' domain-specific intent classification.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 107, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Intent Classification on Three Important Domains", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "This work is motivated by introducing multi-task learning (MTL), transfer learning (TL) and acousticmodel information into the framework of integrating -best hypotheses for spoken language understanding. Among those algorithms, we find the MTL results in higher performance compared to the TL. For the acoustic-model information, we illustrate their close relationship with the hypothesis quality and utilize the hierarchical attention mechanism to include the information for byte pair embedding and hypothesis integration layer within the shared layers, which can further enhance the MTL. The relative error reduction is 19.3% for domain classification and 36.9% for intent classification. We also use some utterances to analyze the real cause of the improvements. By decoding the hidden representations and comparing with transcription, we find by the MTL, the model attempts to find a balance point and do some reasonable query rewriting. In the future, we will explore more by introducing more tasks, improving the efficiency and utilizing more abundant information like word lattice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "4" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The fifth'chime'speech separation and recognition challenge: dataset, task and baselines", |
|
"authors": [ |
|
{ |
|
"first": "Jon", |
|
"middle": [], |
|
"last": "Barker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shinji", |
|
"middle": [], |
|
"last": "Watanabe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emmanuel", |
|
"middle": [], |
|
"last": "Vincent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Trmal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1803.10609" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jon Barker, Shinji Watanabe, Emmanuel Vincent, and Jan Trmal. 2018. The fifth'chime'speech separation and recognition challenge: dataset, task and baselines. arXiv preprint arXiv:1803.10609.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Multitask learning. Machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Caruana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "41--75", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rich Caruana. 1997. Multitask learning. Machine learning, 28(1):41-75.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Improving broadcast news transcription by lightly supervised discriminative training", |
|
"authors": [ |
|
{ |
|
"first": "Chan", |
|
"middle": [], |
|
"last": "Ho Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Woodland", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ho Yin Chan and Phil Woodland. 2004. Improving broadcast news transcription by lightly supervised discrimina- tive training. In 2004 IEEE International Conference on Acoustics, Speech, and Signal Processing, volume 1, pages I-737. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Discriminative syntactic language modeling for speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Roark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Murat", |
|
"middle": [], |
|
"last": "Saraclar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "507--514", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Collins, Brian Roark, and Murat Saraclar. 2005. Discriminative syntactic language modeling for speech recognition. In Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics, pages 507-514. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A post-processing system to yield reduced word error rates: Recognizer output voting error reduction (rover)", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Jonathan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fiscus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "IEEE Workshop on Automatic Speech Recognition and Understanding Proceedings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "347--354", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan G Fiscus. 1997. A post-processing system to yield reduced word error rates: Recognizer output voting error reduction (rover). In 1997 IEEE Workshop on Automatic Speech Recognition and Understanding Proceedings, pages 347-354. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Universal language model fine-tuning for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Howard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1801.06146" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. arXiv preprint arXiv:1801.06146.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Entity hierarchy embedding", |
|
"authors": [ |
|
{ |
|
"first": "Zhiting", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Poyao", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuntian", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yingkai", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1292--1300", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiting Hu, Poyao Huang, Yuntian Deng, Yingkai Gao, and Eric Xing. 2015. Entity hierarchy embedding. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1292-1300.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Normalization of asr confidence classifier scores via confidence mapping", |
|
"authors": [ |
|
{ |
|
"first": "Kshitiz", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaojun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Fifteenth Annual Conference of the International Speech Communication Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kshitiz Kumar, Chaojun Liu, and Yifan Gong. 2014. Normalization of asr confidence classifier scores via confidence mapping. In Fifteenth Annual Conference of the International Speech Communication Association.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Beno\u00eet Dumoulin, and Yifan Gong. 2015. Confidencefeatures and confidence-scores for asr applications in arbitration and dnn speaker adaptation", |
|
"authors": [ |
|
{ |
|
"first": "Kshitiz", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziad", |
|
"middle": [ |
|
"Al" |
|
], |
|
"last": "Bawab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yong", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaojun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kshitiz Kumar, Ziad Al Bawab, Yong Zhao, Chaojun Liu, Beno\u00eet Dumoulin, and Yifan Gong. 2015. Confidence- features and confidence-scores for asr applications in arbitration and dnn speaker adaptation. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Deep learning ip network representations", |
|
"authors": [ |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cristian", |
|
"middle": [], |
|
"last": "Lumezanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Zong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Workshop on Big Data Analytics and Machine Learning for Data Communication Networks", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--39", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mingda Li, Cristian Lumezanu, Bo Zong, and Haifeng Chen. 2018. Deep learning ip network representations. In Proceedings of the 2018 Workshop on Big Data Analytics and Machine Learning for Data Communication Networks, pages 33-39.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Improving spoken language understanding by exploiting asr n-best hypotheses", |
|
"authors": [ |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weitong", |
|
"middle": [], |
|
"last": "Ruan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinyue", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Soldaini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wael", |
|
"middle": [], |
|
"last": "Hamza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengwei", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2001.05284" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mingda Li, Weitong Ruan, Xinyue Liu, Luca Soldaini, Wael Hamza, and Chengwei Su. 2020. Improving spoken language understanding by exploiting asr n-best hypotheses. arXiv preprint arXiv:2001.05284.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Efficient Latent Semantic Extraction from Cross Domain Data with Declarative Language", |
|
"authors": [ |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mingda Li. 2020. Efficient Latent Semantic Extraction from Cross Domain Data with Declarative Language. Ph.D. thesis, UCLA.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Multi-task deep neural networks for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pengcheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weizhu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.11504" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaodong Liu, Pengcheng He, Weizhu Chen, and Jianfeng Gao. 2019. Multi-task deep neural networks for natural language understanding. arXiv preprint arXiv:1901.11504.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Classification methods for remotely sensed data", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Mather", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brandt", |
|
"middle": [], |
|
"last": "Tso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Mather and Brandt Tso. 2016. Classification methods for remotely sensed data. CRC press.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A reranking approach for recognition and classification of speech input in conversational dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Morbini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kartik", |
|
"middle": [], |
|
"last": "Audhkhasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Artstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maarten", |
|
"middle": [], |
|
"last": "Van Segbroeck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Sagae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Panayiotis", |
|
"middle": [], |
|
"last": "Georgiou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shri", |
|
"middle": [], |
|
"last": "David R Traum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "2012 IEEE Spoken Language Technology Workshop (SLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "49--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabrizio Morbini, Kartik Audhkhasi, Ron Artstein, Maarten Van Segbroeck, Kenji Sagae, Panayiotis Georgiou, David R Traum, and Shri Narayanan. 2012. A reranking approach for recognition and classification of speech input in conversational dialogue systems. In 2012 IEEE Spoken Language Technology Workshop (SLT), pages 49-54. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Rescoring n-best speech recognition list based on one-on-one hypothesis comparison using encoder-classifier model", |
|
"authors": [ |
|
{ |
|
"first": "Atsunori", |
|
"middle": [], |
|
"last": "Ogawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Delcroix", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shigeki", |
|
"middle": [], |
|
"last": "Karita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomohiro", |
|
"middle": [], |
|
"last": "Nakatani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6099--6103", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Atsunori Ogawa, Marc Delcroix, Shigeki Karita, and Tomohiro Nakatani. 2018. Rescoring n-best speech recogni- tion list based on one-on-one hypothesis comparison using encoder-classifier model. In 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6099-6103. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders", |
|
"authors": [ |
|
{ |
|
"first": "Atsunori", |
|
"middle": [], |
|
"last": "Ogawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Delcroix", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shigeki", |
|
"middle": [], |
|
"last": "Karita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomohiro", |
|
"middle": [], |
|
"last": "Nakatani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proc. Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3900--3904", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Atsunori Ogawa, Marc Delcroix, Shigeki Karita, and Tomohiro Nakatani. 2019. Improved Deep Duel Model for Rescoring N-Best Speech Recognition List Using Backward LSTMLM and Ensemble Encoders. In Proc. Interspeech 2019, pages 3900-3904.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A survey on transfer learning", |
|
"authors": [ |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Sinno Jialin Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "IEEE Transactions on knowledge and data engineering", |
|
"volume": "22", |
|
"issue": "10", |
|
"pages": "1345--1359", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sinno Jialin Pan and Qiang Yang. 2009. A survey on transfer learning. IEEE Transactions on knowledge and data engineering, 22(10):1345-1359.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Search results based n-best hypothesis rescoring with maximum entropy classification", |
|
"authors": [ |
|
{ |
|
"first": "Fuchun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Roy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Shahshahani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fran\u00e7oise", |
|
"middle": [], |
|
"last": "Beaufays", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "IEEE Workshop on Automatic Speech Recognition and Understanding", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "422--427", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fuchun Peng, Scott Roy, Ben Shahshahani, and Fran\u00e7oise Beaufays. 2013. Search results based n-best hypothesis rescoring with maximum entropy classification. In 2013 IEEE Workshop on Automatic Speech Recognition and Understanding, pages 422-427. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc. of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettle- moyer. 2018. Deep contextualized word representations. In Proc. of NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Talking to your tv: Context-aware voice search with hierarchical recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Jinfeng", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ferhan", |
|
"middle": [], |
|
"last": "Ture", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oliver", |
|
"middle": [], |
|
"last": "Jojic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 ACM on Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "557--566", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinfeng Rao, Ferhan Ture, Hua He, Oliver Jojic, and Jimmy Lin. 2017. Talking to your tv: Context-aware voice search with hierarchical recurrent neural networks. In Proceedings of the 2017 ACM on Conference on Information and Knowledge Management, pages 557-566.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Discriminative reranking of ASR hypotheses with morpholexical and n-best-list features", |
|
"authors": [ |
|
{ |
|
"first": "Hasim", |
|
"middle": [], |
|
"last": "Sak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Murat", |
|
"middle": [], |
|
"last": "Saraclar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tunga", |
|
"middle": [], |
|
"last": "Gungor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "2011 IEEE Workshop on Automatic Speech Recognition & Understanding, ASRU 2011", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "202--207", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hasim Sak, Murat Saraclar, and Tunga Gungor. 2011. Discriminative reranking of ASR hypotheses with mor- pholexical and n-best-list features. In 2011 IEEE Workshop on Automatic Speech Recognition & Understanding, ASRU 2011, Waikoloa, HI, USA, December 11-15, 2011, pages 202-207.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1508.07909" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2015. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Transfer learning", |
|
"authors": [ |
|
{ |
|
"first": "Lisa", |
|
"middle": [], |
|
"last": "Torrey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jude", |
|
"middle": [], |
|
"last": "Shavlik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Handbook of research on machine learning applications and trends: algorithms, methods, and techniques", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "242--264", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lisa Torrey and Jude Shavlik. 2010. Transfer learning. In Handbook of research on machine learning applications and trends: algorithms, methods, and techniques, pages 242-264. IGI Global.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Spoken language understanding: Systems for extracting semantic information from speech", |
|
"authors": [ |
|
{ |
|
"first": "Gokhan", |
|
"middle": [], |
|
"last": "Tur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Renato", |
|
"middle": [ |
|
"De" |
|
], |
|
"last": "Mori", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gokhan Tur and Renato De Mori. 2011. Spoken language understanding: Systems for extracting semantic information from speech. John Wiley & Sons.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "The microsoft 2017 conversational speech recognition system", |
|
"authors": [ |
|
{ |
|
"first": "Wayne", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lingfeng", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fil", |
|
"middle": [], |
|
"last": "Alleva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jasha", |
|
"middle": [], |
|
"last": "Droppo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuedong", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 IEEE international conference on acoustics, speech and signal processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5934--5938", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wayne Xiong, Lingfeng Wu, Fil Alleva, Jasha Droppo, Xuedong Huang, and Andreas Stolcke. 2018. The microsoft 2017 conversational speech recognition system. In 2018 IEEE international conference on acoustics, speech and signal processing (ICASSP), pages 5934-5938. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Hierarchical attention networks for document classification", |
|
"authors": [ |
|
{ |
|
"first": "Zichao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diyi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Smola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 conference of the North American chapter of the association for computational linguistics: human language technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1480--1489", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alex Smola, and Eduard Hovy. 2016. Hierarchical attention networks for document classification. In Proceedings of the 2016 conference of the North American chapter of the association for computational linguistics: human language technologies, pages 1480-1489.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "A survey on multi-task learning", |
|
"authors": [ |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu Zhang and Qiang Yang. 2017. A survey on multi-task learning.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"text": "1st is not the best)Confidence Score of ASR 1-BestFigure 3: Confidence score of ASR 1-Best (bin size 10\u2030).", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Figure 4: Confidence score difference (bin size 5\u2030).", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Attention on hypotheses integration.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Figure 8: Improvements on 8 important domains.", |
|
"type_str": "figure" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Relative error reduction (RErr) for domain classification." |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Comparison among the decoded hidden representations, hypotheses and transcriptions." |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Intent classification: relative error reduction versus Baseline." |
|
} |
|
} |
|
} |
|
} |