|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:33:04.048969Z" |
|
}, |
|
"title": "ASR Adaptation for E-commerce Chatbots using Cross-Utterance Context and Multi-Task Language Modeling", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Shenoy", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon AWS AI", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Sravan", |
|
"middle": [], |
|
"last": "Bodapati", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon AWS AI", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Kirchhoff", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon AWS AI", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Automatic Speech Recognition (ASR) robustness toward slot entities are critical in ecommerce voice assistants that involve monetary transactions and purchases. Along with effective domain adaptation, it is intuitive that cross utterance contextual cues play an important role in disambiguating domain specific content words from speech. In this paper, we investigate various techniques to improve contextualization, content word robustness and domain adaptation of a Transformer-XL neural language model (NLM) to rescore ASR N-best hypotheses. To improve contextualization, we utilize turn level dialogue acts along with cross utterance context carry over. Additionally, to adapt our domaingeneral NLM towards e-commerce on-the-fly, we use embeddings derived from a finetuned masked LM on in-domain data. Finally, to improve robustness towards in-domain content words, we propose a multi-task model that can jointly perform content word detection and language modeling tasks. Compared to a noncontextual LSTM LM baseline, our best performing NLM rescorer results in a content WER reduction of 19.2% on e-commerce audio test set and a slot labeling F1 improvement of 6.4%.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Automatic Speech Recognition (ASR) robustness toward slot entities are critical in ecommerce voice assistants that involve monetary transactions and purchases. Along with effective domain adaptation, it is intuitive that cross utterance contextual cues play an important role in disambiguating domain specific content words from speech. In this paper, we investigate various techniques to improve contextualization, content word robustness and domain adaptation of a Transformer-XL neural language model (NLM) to rescore ASR N-best hypotheses. To improve contextualization, we utilize turn level dialogue acts along with cross utterance context carry over. Additionally, to adapt our domaingeneral NLM towards e-commerce on-the-fly, we use embeddings derived from a finetuned masked LM on in-domain data. Finally, to improve robustness towards in-domain content words, we propose a multi-task model that can jointly perform content word detection and language modeling tasks. Compared to a noncontextual LSTM LM baseline, our best performing NLM rescorer results in a content WER reduction of 19.2% on e-commerce audio test set and a slot labeling F1 improvement of 6.4%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Task-oriented conversations in voice chatbots deployed for e-commerce usecases such as shopping (Maarek, 2018) , browsing catalog, scheduling deliveries or ordering food are predominantly shortform audios. Moreover, these dialogues are restricted to a narrow range of multi-turn interactions that involve accomplishing a specific task (Mari et al., 2020) . The back and forth between a user and the chatbots are key to reliably capture the user intent and slot entities referenced in the spoken utterances. As shown in previous works (Irie et al., 2019; Parthasarathy et al., 2019; Sun et al., 2021) , rather than decoding each utterance independently, there can be benefit in decoding these utterances based on context from previous turns. In the case of grocery shopping for example, knowing that the context is \"what kind of laundry detergent?\" should help in disambiguating \"pods\" from \"pause\". Another common aspect in e-commerce chatbots is that the speech patterns differ among sub-categories of usecases (Eg. shopping clothes vs ordering fast food). Hence, some chatbot systems allow users to provide pre-defined grammars or sample utterances that are specific for their usecase . These user provided grammars are then predominantly used to perform domain adaptation on an n-gram language model. Recently (Shenoy et al., 2021) showed that these can be leveraged to bias a Transformer-XL (TXL) LM rescorer on-the-fly.", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 110, |
|
"text": "(Maarek, 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 335, |
|
"end": 354, |
|
"text": "(Mari et al., 2020)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 534, |
|
"end": 553, |
|
"text": "(Irie et al., 2019;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 581, |
|
"text": "Parthasarathy et al., 2019;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 582, |
|
"end": 599, |
|
"text": "Sun et al., 2021)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1313, |
|
"end": 1334, |
|
"text": "(Shenoy et al., 2021)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "While there has been extensive previous work on improving contextualization of TXL LM using historical context, none of the approaches utilize signals from a natural language understanding (NLU) component such as turn level dialogue acts. This paper investigates how to utilize dialogue acts along with user provided speech patterns to adapt a domain-general TXL LM towards different ecommerce usecases on-the-fly. We also propose a novel multi-task architecture for TXL, where the model jointly learns to perform domain specific slot detection and LM tasks. We use perplexity (PPL) and word error rate (WER) as our evaluation metrics. We also evaluate on downstream NLU metrics such as intent classification (IC) F1 and slot labeling (SL) F1 to capture the success of these conversations. The overall contributions of this work can be summarized as follows :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We show that a TXL model that utilizes turn level dialogue act information along with long span context helps with contextualiziation and improves WER and IC F1 in e-commerce chatbots.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 To improve robustness towards e-commerce domain specifc slot entities, we propose a novel TXL architecture that is jointly trained on slot detection and LM tasks which significantly improves content WERR and SL F1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We show that adapting the NLM towards user provided speech patterns by using BERT on domain specific text is an efficient and effective method to perform on-the-fly adaptation of a domain-general NLM towards ecommerce utterances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Incorporating cross utterance context has been well explored with both recurrent and non-recurrent NLMs. With LSTM NLMs, long span context is usually propogated without resetting hidden states across sentences or using longer sequence lengths (Xiong et al., 2018a; Irie et al., 2019; Khandelwal et al., 2018; Parthasarathy et al., 2019) . In (Xiong et al., 2018b) , along with longer history, information about turn taking and speaker overlap is used to improve contextualization in human to human conversations. With transformer architecture based on self attention (Vaswani et al., 2017) (Dai et al., 2019) showed that by utilizing segment wise recurrence Transformer-XL (TXL) (Dai et al., 2019 ) is able to effectively leverage long span context while decoding. More recently, improving contextualization of the TXL models included adding a LSTM fusion layer to complement the advantages of recurrent with non-recurrent models (Sun et al., 2021) . (Shenoy et al., 2021 ) incorporated a non-finetuned masked LM fusion in order to make the domain adaptation of TXL models quick and on-the-fly using embeddings derived from customer provided data and incorporated dialogue acts but only with an LSTM based LM. While (Sunkara et al., 2020) tried to fuse multi-model features into a seq-to-seq LSTM based network. In (Sharma, 2020) cross utterance context was effectively used to perform better intent classification with e-commerce voice assistants. For domain adaptation, previous techniques explored include using an explicit topic vector as classified by a separate domain classifier and incorporating a neural cache (Mikolov and Zweig, 2019; Li et al., 2018; Raju et al., 2018; Chen et al., 2015) . (Irie et al., 2018) used a mixture of domain experts which are dynamically interpolated. It is also shown in , that using a hybrid pointer network over contextual metadata can also help in transcribing long form social media audio. Joint learning NLU tasks such as intent detection and slot filling have been explored with RNN based LMs in (Liu and Lane, 2016) and more recently in (Rao et al., 2020) , where they show that a jointly trained model consisting of both ASR and NLU tasks interfaced with a neural network based interface helps incorporate semantic information from NLU and improves ASR that comprises a LSTM based NLM. In tried to incorporate joint slot and intent detection into a LSTM based rescorer with a goal of improving accuracy on rare words in an end-to-end ASR system.", |
|
"cite_spans": [ |
|
{ |
|
"start": 243, |
|
"end": 264, |
|
"text": "(Xiong et al., 2018a;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 283, |
|
"text": "Irie et al., 2019;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 308, |
|
"text": "Khandelwal et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 336, |
|
"text": "Parthasarathy et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 363, |
|
"text": "(Xiong et al., 2018b)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 567, |
|
"end": 589, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 590, |
|
"end": 608, |
|
"text": "(Dai et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 679, |
|
"end": 696, |
|
"text": "(Dai et al., 2019", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 930, |
|
"end": 948, |
|
"text": "(Sun et al., 2021)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 951, |
|
"end": 971, |
|
"text": "(Shenoy et al., 2021", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1216, |
|
"end": 1238, |
|
"text": "(Sunkara et al., 2020)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1315, |
|
"end": 1329, |
|
"text": "(Sharma, 2020)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1619, |
|
"end": 1644, |
|
"text": "(Mikolov and Zweig, 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1645, |
|
"end": 1661, |
|
"text": "Li et al., 2018;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1662, |
|
"end": 1680, |
|
"text": "Raju et al., 2018;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1681, |
|
"end": 1699, |
|
"text": "Chen et al., 2015)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1702, |
|
"end": 1721, |
|
"text": "(Irie et al., 2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 2042, |
|
"end": 2062, |
|
"text": "(Liu and Lane, 2016)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 2084, |
|
"end": 2102, |
|
"text": "(Rao et al., 2020)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "However, none of the previous work utilize dialogue acts with a non-recurrent based LM such as Transformer-XL nor optimize towards improving robustness of in-domain slot entities. In this paper we experiment and study the impact of utilizing dialogue acts along with a masked language model fusion to improve contextualization and domain adaptation. Additionally, we also propose a novel multi-task architecture with TXL LM that improves the robustness towards in-domain slot entity detection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "A standard language model in an ASR system computes a probability distribution over a sequence of words W = w 0 , ..., w N auto-regressively as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(W ) = N i=1 p(w i |w 1 , w 2 , ..., w i\u22121 )", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In our experiments, along with historical context, we condition the LM on additional contextual metadata such as dialogue acts :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "p(W ) = N i=1 p(w i |w 1 , w 2 , ..., w i\u22121 , c 1 , c 2 , ..., c k ) (2) Where c 1 , c 2 , .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "..c k are the turn based lexical representation of the contextual metadata. For baseline, we use a standard LSTM LM as summarized below : where embed i is a fixed size lower dimensional word embedding and the LSTM outputs are projected to word level outputs using W T ho . A Sof tmax layer converts the word level outputs into final word level probabilities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "embed i = E T ke w i\u22121 c i , h i = LST M (h i\u22121 , c i\u22121 , embed i ) p(w i |w <i ) = Sof tmax(W T ho h i )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "{w 0 , w 1 , ..w T } {s 0 , s 1 , ...s T } {w 1 , w 2 , ...w T+1 } h k-1 x N", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Although recurrent language models help in modeling long range dependencies to certain extent, they still suffer from the fuzzy far away problem (Khandelwal et al., 2018) . Vanilla transformer LMs on the other hand use fixed segment lengths which leads to context fragmentation. To address these limitations and model long range dependencies, TXL models add segment-level recurrence and use a relative positional encoding scheme (Dai et al., 2019) . Hence we choose to use a TXL LM directly. The cached hidden representations from previous segments helps contextual information flow across segment boundaries. If s k = [w k,1 , ..., w k,T ] and s k+1 = [x k+1,1 , ..., x k+1,T ] are two consecutive segments of length T and h n k is the n-th layer hid-den state produced for the k-th segment s k , then, the n-th layer hidden state for segment s k+1 is produced as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 170, |
|
"text": "(Khandelwal et al., 2018)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 429, |
|
"end": 447, |
|
"text": "(Dai et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer-XL based NLM", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h n\u22121 k+1 = [SG(h n\u22121 k ) \u2022 h n\u22121 k+1 ] q n k+1 , k n k+1 , v n k+1 =h n\u22121 k+1 W q h n k+1 = T L(q n k+1 , k n k+1 , v n k+1 )", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Transformer-XL based NLM", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where SG(.) stands for stop gradient and T L stands for Transformer Layer. To carry over context from previous turns, we train and evaluate the model by concatenating all the turns, including the bot responses, in a single conversation session. The model is trained with a cross entropy objective as defined below :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer-XL based NLM", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "L LM = \u2212 1 T T i=1 log(P (w i | w <i , s <i )) (5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer-XL based NLM", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "During inference time, we cache a fixed length hidden representation from previous segments. We also use the generated bot responses to perform a forward pass and carry over the context to the next user turn.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer-XL based NLM", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To make the our domain-general model robust to e-commerce specific slot entities, we propose a multi-task learning approach to training the TXL LM. We train our models on both LM and slot detection tasks. Similar to slot filling, slot detection is a sequence classification task that involves predicting if a word, w i at time step i is a domain specific slot entity. We use a separate slot detection network, consisting of a simple multi-layer perceptron, and use the final layer hidden representation from the TXL network as inputs to the network. Figure 2 shows an example utterance with the slot annotations. Formally, let s = (s 0 , s 1 , ..., s T ) be the slot label sequence, corresponding to a word sequence w = w 0 , w 1 , ...., w T in the k-th segment. We model the slot label output s t as a conditional distribution over input word sequence up to time step t, w \u2264t similar to (Liu and Lane, 2016) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 888, |
|
"end": 908, |
|
"text": "(Liu and Lane, 2016)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 550, |
|
"end": 558, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Slot detection and language modeling multi-task learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h n k = T L(q n k , k n k , v n k ) p(s t |w \u2264t ) = SlotLabelDist(h n t )", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Slot detection and language modeling multi-task learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We use a cross-entropy training objective for the slot detection task as below :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot detection and language modeling multi-task learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L SD = \u2212 1 T T i=1 log(P (s i | w \u2264i ))", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Slot detection and language modeling multi-task learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To incorporate this semantic information about the word from previous time step into the NLM, we use the logits from the slot detection network to condition the probability distribution of the next word in the sequence as shown in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 239, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Slot detection and language modeling multi-task learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The total loss is then computed using a linear combination of LM and slot detection losses:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot detection and language modeling multi-task learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "L total = L LM + \u03b1 SD L SD (8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot detection and language modeling multi-task learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where \u03b1 SD is the weight for the slot detection loss.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Slot detection and language modeling multi-task learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Dialogue acts (DA) in a conversation represent the intention of an utterance and is intended towards capturing the action that an agent is trying to accomplish (Austin, 1975 ). An example conversation snippet with DA is shown in Table 1 . DA classification is typically performed in a separate component that is part of a downstream NLU system and consumes the outputs generated by ASR. The classified DA is an important contextual signal that provides hints about the type of speech pattern that can be expected in the next turn. We utilize these signals to train our TXL models. Specifically, we augment the training data with the dialogue act information prefixed to the user turns and surround them with explicit <dialogue_act> tags. The expectation is that the TXL LM learns the usage patterns associated with different dialogue acts and this information should help narrow down the search space for the model to content words relevant to the current dialogue context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 173, |
|
"text": "(Austin, 1975", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 236, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Transformer-XL LM conditioning on dialogue acts", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In production chatbots, it is common for bot developers to provide example speech patterns, in the form of sample sentences or explicit grammars, which can then be used to bias the n-gram language models in a ASR system . This pre-defined set of speech patterns is a useful source of contextual information that can be also used to bias NLMs as well. As demonstrated in (Shenoy et al., 2021) , pretrained masked language (Radford et al., 2016; Brown et al., 2020) . However, the sentence or document embeddings derived from such an MLM without finetuning on in-domain data is shown to be inferior in terms of the ability to capture semantic information that can be used in similarity related tasks (Reimers and Gurevych, 2019) . Instead of using the [CLS] vector to obtain sentence embeddings, in this paper we take the average of context embeddings from last two layers as these are shown to be consistently better than using [CLS] vector (Reimers and Gurevych, 2019; Li et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 370, |
|
"end": 391, |
|
"text": "(Shenoy et al., 2021)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 443, |
|
"text": "(Radford et al., 2016;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 463, |
|
"text": "Brown et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 698, |
|
"end": 726, |
|
"text": "(Reimers and Gurevych, 2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 927, |
|
"end": 932, |
|
"text": "[CLS]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 940, |
|
"end": 968, |
|
"text": "(Reimers and Gurevych, 2019;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 969, |
|
"end": 985, |
|
"text": "Li et al., 2020)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Domain adaptation using contextual semantic embeddings", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We use a simple fusion method as experimented in (Shenoy et al., 2021) where the hidden state from the last layer of the TXL decoder is concatenated with the BERT derived embedding. This is then followed by a single projection layer with a nonlinear activation function \u03c3, such as sigmoid.", |
|
"cite_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 70, |
|
"text": "(Shenoy et al., 2021)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Domain adaptation using contextual semantic embeddings", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "g t = \u03c3(W [h T XL t ; e M LM ] + b)", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Domain adaptation using contextual semantic embeddings", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Where h T XL t is the hidden state from the last transformer decoder and e M LM is the BERT derived embedding from in domain sample utterances. The intuition here is that the model learns to associate the domain specific BERT derived embedding with the occurrences of jargon specific to that domain. Thus providing different BERT vectors derived from different domain texts should allow the model to adapt towards such domains on-the-fly. 4 Experimental Setup", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Domain adaptation using contextual semantic embeddings", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We required task-oriented dialogue datasets with actor, dialogue acts and the slot entities annotated. Since no single dataset was large enough to train a reliable language model, we used a combination of Schema-Guided Dialogue Dataset (Rastogi et al., 2019) , MultiWOZ 2.1 (Eric et al., 2019; Budzianowski et al., 2018) , MultiDoGo (Peskov et al., 2019) along with anonymized in-house datasets that belong to two e-commerce usecases : retail and fastfood delivery. The final LM training data consisted of 260k training samples, 56k validation and evaluation samples and around 9.9 million running words. We used a vocabulary of size 25k. We evaluated our models on anonymized in-house 8kHz close-talk audio. These audio comprised of task-oriented conversations with multiple speakers and acoustic conditions representative of real world usage and belonged to the same two usecases mentioned above. The average number of turns in the audio dataset was 5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 236, |
|
"end": 258, |
|
"text": "(Rastogi et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 293, |
|
"text": "(Eric et al., 2019;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 320, |
|
"text": "Budzianowski et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 354, |
|
"text": "(Peskov et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We used a hybrid ASR model comprising of a regular-frame-rate (RFR) model trained on crossentropy loss, followed by sMBR (Ghoshal and Povey, 2013) . The first pass LM we used was a domain-general Kneser-Ney (KN) (Kneser and Ney, 1995) smoothed 4-gram model estimated on a weighted mix of datasets spanning multiple domains. The final vocabulary size of the n-gram LM was 500k words. All our NLM rescorers used a 4-layer Tranformer-XL 1 decoder, each of size 512 with 4 attention heads. The input word embedding size was 512. We used a segment and memory length of 25. During model training we applied a dropout rate of 0.3 to both the slot detection network and TXL. For the slot detection layer we used a 3 layer MLP and used the final layer hidden representation from the TXL as the output. To obtain the BERT embedding from in-domain speech patterns, we finetune huggingface 2 pretrained BERT mode on the retail and fastfood text corpus. The derived BERT embedding size used was 768. During inference, we extract n-best hypothesis with n<=50 from the lattice generated by the first pass ASR model. We rescored the n-best hypothesis by multiplying the acoustic score with the acoustic scale and adding it to the scores obtained from the TXL rescorer. We used a fixed \u03b1 SD of 0.8 for the slot detection loss. Table 3 summarizes the relative perplexity reductions (PPLR). Since we are optimizing our models to improve on the e-commerce domain specific con-tent words we directly report the relative content word error rate reductions (CWERR) in Table 2 along with the relative impact on the downstream NLU tasks of IC and SL. For computing CWERR, we remove all the stop words comprising of commonly used function words, such as conjunctions and prepositions from the transcriptions and evaluate only on content words. We also report statistical significance of our CWER improvements using matched pairs sentence segment word error test (MPSSWE). All the WER numbers are relative to a non-contextual LSTM baseline. The gap in the performance between the two domains we tested on is reflective of the underlying training corpus distribution, which has more text belonging to the fastfood domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 146, |
|
"text": "(Ghoshal and Povey, 2013)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 212, |
|
"end": 234, |
|
"text": "(Kneser and Ney, 1995)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1310, |
|
"end": 1317, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 1545, |
|
"end": 1552, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "ASR setup and NLM setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Perplexity gains indicate effective domain adaptation We report both general domain and e-commerce domain PPLR. Overall, the contextualization and domain adaptation techniques help with the PPL dropping in both cases. The jointly trained model on in-domain slot detection however clearly helps more in the e-commerce case. Moreover, since we used BERT that was finetuned on e-commerce text we again see larger gains in the domain specific testset when compared to the general domain testset (23.3% vs 16.3%).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Using system dialogue acts improves intent detection: From our experiments that train the TXL LMs with dialogue act information, it is clear that dialogue acts helps with relatively marginal gains in PPL (3.4% on generic and 9.9% on ecommerce) and WER (1.2% Retail, 14.4% Fastfood). When compared to other techniques we explored, we see that the impact on intent classification was higher in proportion to the gain in WER, which indicates that dialogue acts are valuable contextual signals to help with intent conveying phrases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Slot detection loss yields improvements on domain specific content words: Rows 4 and 5 of Table 2 report the content WERR, IC and SL F1s that we obtain by incorporating the joint LM and slot detection (SD) loss. As expected, the multitask model improves on the content words significantly (1.2% to 4.3% on Retail, 12.3% to 16.3% on Fastfood). This WER improvement also carries over to a higher SL F1 improvement, but a relatively small IC F1 improvement. This is again indicative that the improvements are mainly on recognition of in-domain slot entities and the auxil-iary function words that are important to recognize intents do not benefit as much.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 97, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Domain adaptation using BERT fusion provides maximum gains: Rows 6 and 7 in Table 2 illustrate the performance of the TXL LM that incorporates the BERT embedding fusion layer. Compared to the model trained with joint slot detection loss, BERT fusion model performs better on all ASR and the NLU metrics. It is evident from the results that the BERT embeddings that are derived from different user provided text helps the model effectively adapt to the domain that the embedding was derived from. The gains are amplified when complemented with the dialogue acts ability to improve on intent carrying words and the joint slot detection model leading to a WERR improving from 12.3% to 19.2% on the fastfood domain and 1% to 11.8% on the retail domain. This also carries over to an improvement on IC and SL F1 of 3.8%, 4.3% on retail and 2.1%, 6.4% on fastfood.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 83, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper we explored different ways to robustly adapt a domain-general Transformer-XL NLM to rescore N-best hypotheses from a hybrid ASR system for task-oriented e-commerce speech conversations. We demonstrated that Transformer-XL LM trained with turn level dialogue acts benefits intent classification by improving the recognition of content words. Additionally, we show that using semantic embeddings derived from a masked language model finetuned on e-commerce domain can be effectively used to adapt a domain-general TXL LM for e-commerce domain utterance rescoring task. Finally, we introduced a new TXL training loss function to jointly predict content words along with language modeling task, this when combined with BERT fusion and dialogue acts, amplifies the WER, IC F1 and SL F1 gains. We have also shown these improvements to be statistically significant. Future work can look at integrating these methods into an end-to-end ASR system for both rescoring task and first pass LM fusion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://github.com/kimiyoung/transformer-xl 2 https://github.com/huggingface/transformers", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "How to do things with words", |
|
"authors": [ |
|
{ |
|
"first": "Austin", |
|
"middle": [], |
|
"last": "John Langshaw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1975, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Langshaw Austin. 1975. How to do things with words.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Multiwoz -A large-scale multi-domain wizard-of-oz dataset for task-oriented dialogue modelling", |
|
"authors": [ |
|
{ |
|
"first": "Pawel", |
|
"middle": [], |
|
"last": "Budzianowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsung-Hsien", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo-Hsiang", |
|
"middle": [], |
|
"last": "Tseng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I\u00f1igo", |
|
"middle": [], |
|
"last": "Casanueva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Ultes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Osman Ramadan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gasic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pawel Budzianowski, Tsung-Hsien Wen, Bo-Hsiang Tseng, I\u00f1igo Casanueva, Stefan Ultes, Osman Ra- madan, and Milica Gasic. 2018. Multiwoz - A large-scale multi-domain wizard-of-oz dataset for task-oriented dialogue modelling. CoRR, abs/1810.00278.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Recurrent neural network language model adaptation for multi-genre broadcast speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xunying", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Lanchantin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Mark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Gales", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Woodland", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3511--3515", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Chen, T. Tan, Xunying Liu, Pierre Lanchantin, M. Wan, Mark J. F. Gales, and Philip C. Woodland. 2015. Recurrent neural network language model adaptation for multi-genre broadcast speech recog- nition. In Interspeech 2015, Dresden, Germany, September 6-10, 2015, pages 3511-3515. ISCA.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Transformer-XL: Attentive language models beyond a fixed-length context", |
|
"authors": [ |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2978--2988", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1285" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Car- bonell, Quoc Le, and Ruslan Salakhutdinov. 2019. Transformer-XL: Attentive language models beyond a fixed-length context. In Proceedings of the 57th Annual Meeting of the Association for Computa- tional Linguistics, pages 2978-2988, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Multiwoz 2.1: Multi-domain dialogue state corrections and state tracking baselines", |
|
"authors": [ |
|
{ |
|
"first": "Mihail", |
|
"middle": [], |
|
"last": "Eric", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shachi", |
|
"middle": [], |
|
"last": "Paul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhishek", |
|
"middle": [], |
|
"last": "Sethi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanchit", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuyang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-T\u00fcr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihail Eric, Rahul Goel, Shachi Paul, Abhishek Sethi, Sanchit Agarwal, Shuyang Gao, and Dilek Hakkani- T\u00fcr. 2019. Multiwoz 2.1: Multi-domain dialogue state corrections and state tracking baselines. CoRR, abs/1907.01669.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Scalable language model adaptation for spoken dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Gandhe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ariya", |
|
"middle": [], |
|
"last": "Rastrow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Hoffmeister", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "In SLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "907--912", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ankur Gandhe, Ariya Rastrow, and Bj\u00f6rn Hoffmeis- ter. 2018. Scalable language model adaptation for spoken dialogue systems. In SLT Workshop 2018, Athens, Greece, pages 907-912. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Sequencediscriminative training of deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Arnab", |
|
"middle": [], |
|
"last": "Ghoshal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Povey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arnab Ghoshal and Daniel Povey. 2013. Sequencedis- criminative training of deep neural networks. In in Proc. INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Radmm: Recurrent adaptive mixture model with applications to domain robust language modeling", |
|
"authors": [ |
|
{ |
|
"first": "Kazuki", |
|
"middle": [], |
|
"last": "Irie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shankar", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Nirschl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hank", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6079--6083", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kazuki Irie, Shankar Kumar, Michael Nirschl, and Hank Liao. 2018. Radmm: Recurrent adaptive mix- ture model with applications to domain robust lan- guage modeling. In IEEE International Conference on Acoustics, Speech, and Signal Processing, pages 6079-6083, Calgary, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Training language models for longspan cross-sentence evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Kazuki", |
|
"middle": [], |
|
"last": "Irie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Zeyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralf", |
|
"middle": [], |
|
"last": "Schl\u00fcter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ASRU Singapore", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "419--426", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kazuki Irie, Albert Zeyer, Ralf Schl\u00fcter, and Hermann Ney. 2019. Training language models for long- span cross-sentence evaluation. In ASRU Singapore, pages 419-426. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Sharp nearby, fuzzy far away: How neural language models use context", |
|
"authors": [ |
|
{ |
|
"first": "Urvashi", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "He", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "284--294", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1027" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Urvashi Khandelwal, He He, Peng Qi, and Dan Juraf- sky. 2018. Sharp nearby, fuzzy far away: How neu- ral language models use context. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 284-294, Melbourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Improved backing-off for m-gram language modeling", |
|
"authors": [ |
|
{ |
|
"first": "Reinhard", |
|
"middle": [], |
|
"last": "Kneser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "181--184", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reinhard Kneser and Hermann Ney. 1995. Improved backing-off for m-gram language modeling. In ICASSP, pages 181-184. IEEE Computer Society.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "On the sentence embeddings from pre-trained language models", |
|
"authors": [ |
|
{ |
|
"first": "Bohan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junxian", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingxuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9119--9130", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.733" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bohan Li, Hao Zhou, Junxian He, Mingxuan Wang, Yiming Yang, and Lei Li. 2020. On the sentence embeddings from pre-trained language models. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9119-9130, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Recurrent neural network language model adaptation for conversational speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hainan", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Povey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Khudanpur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Interspeech, Hyderabad, India", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3373--3377", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ke Li, Hainan Xu, Yiming Wang, Daniel Povey, and Sanjeev Khudanpur. 2018. Recurrent neural net- work language model adaptation for conversational speech recognition. In Interspeech, Hyderabad, In- dia, 2-6 September 2018, pages 3373-3377.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Joint online spoken language understanding and language modeling with recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Lane", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 17th", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W16-3603" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bing Liu and Ian Lane. 2016. Joint online spoken lan- guage understanding and language modeling with re- current neural networks. In Proceedings of the 17th", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "22--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Meeting of the Special Interest Group on Dis- course and Dialogue, pages 22-30, Los Angeles. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Contextualizing ASR lattice rescoring with hybrid pointer network language model", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Da-Rong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunxi", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yatharth", |
|
"middle": [], |
|
"last": "Synnaeve", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Saraf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zweig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Interspeech 2020, 21st Annual Conference of the International Speech Communication Association, Virtual Event", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3650--3654", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21437/Interspeech.2020-1344" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Da-Rong Liu, Chunxi Liu, Frank Zhang, Gabriel Syn- naeve, Yatharth Saraf, and Geoffrey Zweig. 2020. Contextualizing ASR lattice rescoring with hybrid pointer network language model. In Interspeech 2020, 21st Annual Conference of the International Speech Communication Association, Virtual Event, Shanghai, China, 25-29 October 2020, pages 3650- 3654. ISCA.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Alexa and Her Shopping Journey", |
|
"authors": [ |
|
{ |
|
"first": "Yoelle", |
|
"middle": [], |
|
"last": "Maarek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th ACM International Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3269206.3272923" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoelle Maarek. 2018. Alexa and Her Shopping Jour- ney. In Proceedings of the 27th ACM International Conference on Information and Knowledge Manage- ment, page 1. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "The evolution of marketing in the context of voice commerce: A managerial perspective", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Mari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreina", |
|
"middle": [], |
|
"last": "Mandelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ren\u00e9", |
|
"middle": [], |
|
"last": "Algesheimer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "HCI in Business, Government and Organizations -7th International Conference, HCIBGO 2020, Held as Part of the 22nd HCI International Conference", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "405--425", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-50341-3_32" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Mari, Andreina Mandelli, and Ren\u00e9 Algesheimer. 2020. The evolution of marketing in the context of voice commerce: A managerial perspective. In HCI in Business, Government and Organizations - 7th International Conference, HCIBGO 2020, Held as Part of the 22nd HCI International Conference, HCII 2020, Copenhagen, Denmark, July 19-24, 2020, Proceedings, volume 12204 of Lecture Notes in Computer Science, pages 405-425. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Context dependent recurrent neural network language model", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Zweig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "SLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "234--239", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov and Geoffrey Zweig. 2019. Context dependent recurrent neural network language model. In SLT, pages 234-239. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Longspan language modeling for speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Sarangarajan", |
|
"middle": [], |
|
"last": "Parthasarathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Gale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xie", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Polovets", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuangyu", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarangarajan Parthasarathy, William Gale, Xie Chen, George Polovets, and Shuangyu Chang. 2019. Long- span language modeling for speech recognition. CoRR, abs/1911.04571.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Multi-domain goal-oriented dialogues (Mul-tiDoGO): Strategies toward curating and annotating large scale dialogue data", |
|
"authors": [ |
|
{ |
|
"first": "Denis", |
|
"middle": [], |
|
"last": "Peskov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nancy", |
|
"middle": [], |
|
"last": "Clarke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Krone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brigi", |
|
"middle": [], |
|
"last": "Fodor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adel", |
|
"middle": [], |
|
"last": "Youssef", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "In Proc EMNLP-IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4526--4536", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Denis Peskov, Nancy Clarke, Jason Krone, Brigi Fodor, Yi Zhang, Adel Youssef, and Mona Diab. 2019. Multi-domain goal-oriented dialogues (Mul- tiDoGO): Strategies toward curating and annotating large scale dialogue data. In Proc EMNLP-IJCNLP, pages 4526-4536.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Unsupervised representation learning with deep convolutional generative adversarial networks", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Metz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soumith", |
|
"middle": [], |
|
"last": "Chintala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Luke Metz, and Soumith Chintala. 2016. Unsupervised representation learning with deep con- volutional generative adversarial networks.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Contextual language model adaptation for conversational agents", |
|
"authors": [ |
|
{ |
|
"first": "Anirudh", |
|
"middle": [], |
|
"last": "Raju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Behnam", |
|
"middle": [], |
|
"last": "Hedayatnia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linda", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Gandhe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chandra", |
|
"middle": [], |
|
"last": "Khatri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angeliki", |
|
"middle": [], |
|
"last": "Metallinou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3333--3337", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anirudh Raju, Behnam Hedayatnia, Linda Liu, Ankur Gandhe, Chandra Khatri, Angeliki Metallinou, Anu Venkatesh, and Ariya Rastrow. 2018. Contextual language model adaptation for conversational agents. In Interspeech, Hyderabad, India, pages 3333-3337. ISCA.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Speech to semantics: Improve ASR and NLU jointly via all-neural interfaces", |
|
"authors": [ |
|
{ |
|
"first": "Milind", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anirudh", |
|
"middle": [], |
|
"last": "Raju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Dheram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bach", |
|
"middle": [], |
|
"last": "Bui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ariya", |
|
"middle": [], |
|
"last": "Rastrow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Interspeech 2020, 21st Annual Conference of the International Speech Communication Association, Virtual Event", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "876--880", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21437/Interspeech.2020-2976" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Milind Rao, Anirudh Raju, Pranav Dheram, Bach Bui, and Ariya Rastrow. 2020. Speech to semantics: Improve ASR and NLU jointly via all-neural inter- faces. In Interspeech 2020, 21st Annual Conference of the International Speech Communication Associa- tion, Virtual Event, Shanghai, China, 25-29 October 2020, pages 876-880. ISCA.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Towards Scalable Multi-domain Conversational Agents: The Schema-Guided Dialogue Dataset. arXiv e-prints", |
|
"authors": [ |
|
{ |
|
"first": "Abhinav", |
|
"middle": [], |
|
"last": "Rastogi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoxue", |
|
"middle": [], |
|
"last": "Zang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Srinivas", |
|
"middle": [], |
|
"last": "Sunkara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raghav", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Khaitan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.05855" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhinav Rastogi, Xiaoxue Zang, Srinivas Sunkara, Raghav Gupta, and Pranav Khaitan. 2019. Towards Scalable Multi-domain Conversational Agents: The Schema-Guided Dialogue Dataset. arXiv e-prints, page arXiv:1909.05855.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Sentence-BERT: Sentence embeddings using Siamese BERTnetworks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3982--3992", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1410" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- BERT: Sentence embeddings using Siamese BERT- networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 3982-3992, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Improving intent classification in an E-commerce voice assistant by using interutterance context", |
|
"authors": [ |
|
{ |
|
"first": "Arpit", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 3rd Workshop on e-Commerce and NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "40--45", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.ecnlp-1.6" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arpit Sharma. 2020. Improving intent classification in an E-commerce voice assistant by using inter- utterance context. In Proceedings of The 3rd Work- shop on e-Commerce and NLP, pages 40-45, Seattle, WA, USA. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Adapting long context nlm for asr rescoring in conversational agents", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Shenoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sravan", |
|
"middle": [], |
|
"last": "Bodapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Monica", |
|
"middle": [], |
|
"last": "Sunkara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Srikanth", |
|
"middle": [], |
|
"last": "Ronanki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Kirchhoff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Shenoy, Sravan Bodapati, Monica Sunkara, Srikanth Ronanki, and Katrin Kirchhoff. 2021. Adapting long context nlm for asr rescoring in con- versational agents.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Transformer language models with lstm-based crossutterance information representation", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Woodland", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Sun, C. Zhang, and P. C. Woodland. 2021. Trans- former language models with lstm-based cross- utterance information representation.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Multimodal semi-supervised learning framework for punctuation prediction in conversational speech", |
|
"authors": [ |
|
{ |
|
"first": "Monica", |
|
"middle": [], |
|
"last": "Sunkara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Srikanth", |
|
"middle": [], |
|
"last": "Ronanki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhanush", |
|
"middle": [], |
|
"last": "Bekal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sravan", |
|
"middle": [], |
|
"last": "Bodapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Kirchhoff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Interspeech 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4911--4915", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Monica Sunkara, Srikanth Ronanki, Dhanush Bekal, Sravan Bodapati, and Katrin Kirchhoff. 2020. Mul- timodal semi-supervised learning framework for punctuation prediction in conversational speech. In Interspeech 2020, Shanghai, China, 25-29 October 2020, pages 4911-4915. ISCA.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, volume 30, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "The microsoft 2017 conversational speech recognition system", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fil", |
|
"middle": [], |
|
"last": "Alleva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jasha", |
|
"middle": [], |
|
"last": "Droppo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuedong", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5934--5938", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICASSP.2018.8461870" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "W. Xiong, L. Wu, Fil Alleva, Jasha Droppo, Xuedong Huang, and Andreas Stolcke. 2018a. The microsoft 2017 conversational speech recognition system. In ICASSP, Calgary, Canada, pages 5934-5938. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Session-level language modeling for conversational speech", |
|
"authors": [ |
|
{ |
|
"first": "Wayne", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lingfeng", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2764--2768", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1296" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wayne Xiong, Lingfeng Wu, Jun Zhang, and Andreas Stolcke. 2018b. Session-level language modeling for conversational speech. In Proceedings of the 2018 Conference on Empirical Methods in Natu- ral Language Processing, pages 2764-2768, Brus- sels, Belgium. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Multi-task language modeling for improving speech recognition of rare words", |
|
"authors": [ |
|
{ |
|
"first": "Chao-Han Huck", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linda", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Gandhe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yile", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anirudh", |
|
"middle": [], |
|
"last": "Raju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denis", |
|
"middle": [], |
|
"last": "Filimonov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Bulyko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chao-Han Huck Yang, Linda Liu, Ankur Gandhe, Yile Gu, Anirudh Raju, Denis Filimonov, and Ivan Bu- lyko. 2020. Multi-task language modeling for im- proving speech recognition of rare words. CoRR, abs/2011.11715.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "Transformer-XL language model architecture jointly trained with slot detection task with an optional MLM Example utterance with slots annotated", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>: A sample user bot conversation snippet show-</td></tr><tr><td>ing example dialogue acts.</td></tr><tr><td>models (MLM) such as BERT, can be used to de-</td></tr><tr><td>rive a fixed size semantic representation from this</td></tr><tr><td>lexical information. Large pretrained MLMs are</td></tr><tr><td>gaining widespread popularity and are considered</td></tr><tr><td>as powerful language learners</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"3\">: ASR and NLU improvements on two e-commerce sub-domains : Retail and Fastfood. CWERR -Content</td></tr><tr><td colspan=\"3\">Word Error Reduction, IC F1 -Relative Intent Classification F1 Improvement, SL F1 -Relative Slot Labeling F1</td></tr><tr><td colspan=\"3\">Improvement, MPSSWE p-value test on WERR where significant improvements are in bold</td></tr><tr><td>Model</td><td colspan=\"2\">PPLRgen PPLRecom</td></tr><tr><td>TXL</td><td>-</td><td/></tr><tr><td>+ Dialogue Acts (DA)</td><td>3.4%</td><td>9.9%</td></tr><tr><td>+ Joint Slot Detection (SD)</td><td>8.5%</td><td>11.4%</td></tr><tr><td>+ BERT Fusion (BF)</td><td>16.3%</td><td>23.3%</td></tr><tr><td>+ Joint SD + DA</td><td>9.8%</td><td>13.0%</td></tr><tr><td>+ BF + DA</td><td>21.5%</td><td>25.3%</td></tr><tr><td>+ BF + DA + Joint SD</td><td>21.0%</td><td>25.8%</td></tr></table>" |
|
}, |
|
"TABREF5": { |
|
"text": "Relative perplexity reduction (PPLR) from the various TXL models on a general domain eval set (PPL gen ) and on e-commerce domain eval set (PPL ecom ).", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |