|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:40:51.017424Z" |
|
}, |
|
"title": "Extreme Model Compression for On-device Natural Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Mysore", |
|
"middle": [], |
|
"last": "Kanthashree", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sathyendra", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Samridhi", |
|
"middle": [], |
|
"last": "Choudhary", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Leah", |
|
"middle": [], |
|
"last": "Nicolich-Henkin", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper, we propose and experiment with techniques for extreme compression of neural natural language understanding (NLU) models, making them suitable for execution on resourceconstrained devices. We propose a task-aware, end-to-end compression approach that performs word-embedding compression jointly with NLU task learning. We show our results on a largescale, commercial NLU system trained on a varied set of intents with huge vocabulary sizes. Our approach outperforms a range of baselines and achieves a compression rate of 97.4% with less than 3.7% degradation in predictive performance. Our analysis indicates that the signal from the downstream task is important for effective compression with minimal degradation in performance.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper, we propose and experiment with techniques for extreme compression of neural natural language understanding (NLU) models, making them suitable for execution on resourceconstrained devices. We propose a task-aware, end-to-end compression approach that performs word-embedding compression jointly with NLU task learning. We show our results on a largescale, commercial NLU system trained on a varied set of intents with huge vocabulary sizes. Our approach outperforms a range of baselines and achieves a compression rate of 97.4% with less than 3.7% degradation in predictive performance. Our analysis indicates that the signal from the downstream task is important for effective compression with minimal degradation in performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Spoken Language Understanding (SLU) is the task of extracting meaning from a spoken utterance. A typical approach to SLU consists of two modules: an automatic speech recognition (ASR) module that transcribes the audio into a text transcript, followed by a Natural Language Understanding (NLU) module that predicts the semantics (domain, intent and slots) from the ASR transcript. The last few years have seen an increasing application of deep learning approaches to both ASR (Mohamed et al., 2011; Hinton et al., 2012; Graves et al., 2013; Bahdanau et al., 2016) and NLU (Xu and Sarikaya, 2014; Yao et al., 2013; Ravuri and Stolcke, 2015; , making them more reliable, accurate and efficient. This has led to an increasing popularity of feature-rich commercial voice assistants (VAs) -like Amazon Alexa, Google Assistant, Apple's Siri and Microsoft's Cortana. VAs were used in over 3 billion devices in the world in 2019, and are estimated to reach 8 billion devices by 2023 1 . With a growing number of users relying on VAs for their day-to-day activities, voice interfaces have become ubiquitous, and are employed in a range of devices, including smart TVs, mobile phones, smart appliances, home assistants and wearable devices.", |
|
"cite_spans": [ |
|
{ |
|
"start": 475, |
|
"end": 497, |
|
"text": "(Mohamed et al., 2011;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 518, |
|
"text": "Hinton et al., 2012;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 519, |
|
"end": 539, |
|
"text": "Graves et al., 2013;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 540, |
|
"end": 562, |
|
"text": "Bahdanau et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 571, |
|
"end": 594, |
|
"text": "(Xu and Sarikaya, 2014;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 612, |
|
"text": "Yao et al., 2013;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 613, |
|
"end": 638, |
|
"text": "Ravuri and Stolcke, 2015;", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The SLU processing for VAs is often offloaded to the cloud, where high-performance, compute-rich hardware is used to serve complex machine learning models. However, on-device SLU is growing in popularity due to its wide applicability and attractive benefits (Coucke et al., 2018; McGraw et al., 2016; Saade et al., 2018) . First, it enables VAs to work offline, without an active internet connection, allowing their use in remote areas and on devices with poor or intermittent internet connectivity, for eg. in automobiles. Second, on-device processing reduces latency by eliminating communication over the network, and results in an improved user experience. And third, processing utterances on the edge decreases the load on cloud-services, resulting in reduced cloud hardware requirements and associated costs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 258, |
|
"end": 279, |
|
"text": "(Coucke et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 300, |
|
"text": "McGraw et al., 2016;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 301, |
|
"end": 320, |
|
"text": "Saade et al., 2018)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "NLU is the task of extracting intents and semantics from user queries. NLU in VAs typically consists of the following sub-tasks -domain classification (DC), intent classification (IC) and named entity recognition (NER) . Prior work has shown the effectiveness of recurrent neural models, that jointly model these tasks in a multi-task setup (Kim et al., 2017; Hakkani-Tr et al., 2016; Liu and Lane, 2016a) . These models typically are made up of large word embeddings, sometimes accounting for more than 90% of the model parameters, and hence require compression for their deployment on resource constrained devices. Generic model compression approaches such as quantization (Hubara et al., 2017) are ineffective for compressing large word-embeddings, as they do not achieve the required performance at high compression rates. Prior approaches for word-embedding compression (Raunak, 2017; Shu and Nakayama, 2017) tackle comparatively smaller vocabulary sizes and are typically post-processing approaches, where compression is performed after the downstream task models are trained. Post-processing compression for large vocabulary sizes is not effective as the compression is lossy and task-agnostic. Under higher compression rates, post-processing word embedding compression can lead to a significant degradation in downstream performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 218, |
|
"text": "(NER)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 359, |
|
"text": "(Kim et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 384, |
|
"text": "Hakkani-Tr et al., 2016;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 405, |
|
"text": "Liu and Lane, 2016a)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 675, |
|
"end": 696, |
|
"text": "(Hubara et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 875, |
|
"end": 889, |
|
"text": "(Raunak, 2017;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 890, |
|
"end": 913, |
|
"text": "Shu and Nakayama, 2017)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we present a principled approach for compressing neural models targeted to perform NLU on resource-constrained devices. We tackle a large number of intents and huge vocabularies (\u223c 200K) , which are typical in a large-scale, commercial NLU system. To overcome the limitations of prior task-agnostic embedding compression approaches, we propose an end-to-end compression technique, where the compression layers are jointly trained with the downstream task (NLU) model. Joint training allows for both task-aware compression and compression-aware task learning. Task-aware compression enables the compression model to learn better reconstructions for words that are more important to the downstream task. At the same time, compression-aware task learning enables the downstream task model to adapt itself to the errors in embedding reconstructions. We further combine word embedding compression with recurrent layer compression using quantization to compress our model to just a few MB, achieving a compression rate >97% with <4% drop in predictive performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 193, |
|
"end": 201, |
|
"text": "(\u223c 200K)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Joint Modeling and Multi-Tasking for NLU: Joint modeling of component NLU tasks, such as IC and NER, has been an extensive area of research. Jeong and Lee (2008) propose a triangular conditional random field (CRF) as a unified probabilistic model combining IC and NER. This is further extended by Xu and Sarikaya (2013) , where convolutional neural network based triangular CRFs are used. Other neural network architectures like recursive neural networks (RNNs) (Guo et al., 2014) and their variants (Zhang and Wang, 2016; Liu and Lane, 2016a; Hakkani-Tr et al., 2016; Liu and Lane, 2016b) have also been well explored. However, all these approaches propose to build domain specific models and produce multiple models, one for each domain. Work by Kim et al. (2017) explores a unified, multi-domain, multi-task neural model using RNNs (MT-RNN) and was shown to be effective in sharing knowlege across the component tasks and domains. In contrast, the authors in (Hakkani-Tr et al., 2016 ) use a sequence-to-sequence model to output the complete semantic interpretation of an utterance (DC, IC, NER). In our work, we adapt the multi-task architecture from Kim et al. (2017) , and demonstrate its effectiveness in meeting strict device constraints on compression.", |
|
"cite_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 161, |
|
"text": "Jeong and Lee (2008)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 297, |
|
"end": 319, |
|
"text": "Xu and Sarikaya (2013)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 462, |
|
"end": 480, |
|
"text": "(Guo et al., 2014)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 500, |
|
"end": 522, |
|
"text": "(Zhang and Wang, 2016;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 523, |
|
"end": 543, |
|
"text": "Liu and Lane, 2016a;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 568, |
|
"text": "Hakkani-Tr et al., 2016;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 589, |
|
"text": "Liu and Lane, 2016b)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 748, |
|
"end": 765, |
|
"text": "Kim et al. (2017)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 962, |
|
"end": 986, |
|
"text": "(Hakkani-Tr et al., 2016", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1155, |
|
"end": 1172, |
|
"text": "Kim et al. (2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Neural Model Compression: Due to its many practical applications, research on neural model compression has received massive interest in recent years. Existing approaches for general neural model compression include low-precision computation (Vanhoucke et al., 2011; Hwang and Sung, 2014; Anwar et al., 2015) , quantization (Chen et al., 2015; Zhou et al., 2017) , network pruning (Wen et al., 2016; Han et al., 2015) , SVD-based weight matrix decomposition (Xue et al., 2013) and knowledge distillation (Hinton et al., 2015) . For neural NLP models, however, larger focus has been on compressing huge word embedding matrices. Embedding compression approaches include quantization (Hubara et al., 2017) , binarization (Tissier et al., 2019) , dimensionality reduction and matrix factorization methods such as PCA (Raunak, 2017) and SVD (Acharya et al., 2019 ). An alternative post-training compression approach using deep compositional code learning (DCCL) was also proposed by Shu and Nakayama (2017) . This approach learns compressed embedding representations based on additive quantization (Babenko and Lempitsky, 2014) and forms the basis of our task-aware compression approach. In contrast to Shu and Nakayama (2017), we propose a task-aware compression approach, where embedding compression is performed during the task model training, instead of as a post-processing step. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 241, |
|
"end": 265, |
|
"text": "(Vanhoucke et al., 2011;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 266, |
|
"end": 287, |
|
"text": "Hwang and Sung, 2014;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 307, |
|
"text": "Anwar et al., 2015)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 342, |
|
"text": "(Chen et al., 2015;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 343, |
|
"end": 361, |
|
"text": "Zhou et al., 2017)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 398, |
|
"text": "(Wen et al., 2016;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 399, |
|
"end": 416, |
|
"text": "Han et al., 2015)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 457, |
|
"end": 475, |
|
"text": "(Xue et al., 2013)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 524, |
|
"text": "(Hinton et al., 2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 680, |
|
"end": 701, |
|
"text": "(Hubara et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 717, |
|
"end": 739, |
|
"text": "(Tissier et al., 2019)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 835, |
|
"end": 856, |
|
"text": "(Acharya et al., 2019", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 977, |
|
"end": 1000, |
|
"text": "Shu and Nakayama (2017)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1092, |
|
"end": 1121, |
|
"text": "(Babenko and Lempitsky, 2014)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Problem Setup: NLU consists of three component tasks -Domain Classification (DC), Intent Classification (IC) and Named Entity Recognition (NER). DC and IC are sentence classification tasks and determine the domain (e.g. Music) and the intent (e.g. PlayMusic) of the input utterance. NER is a sequence tagging task, where each word in the utterance is assigned a slot tag (e.g. AlbumName, Song-Name etc). The combination of the domain, intent and slots represents the semantic interpretation for the given utterance and is passed on to the downstream application. Our goal is to compress the NLU models, to fit within extreme disk space constraints with minimal degradation in predictive performance. Furthermore, low-latency and inference support for the models are desirable.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Model Architectural Constraints: Our choice of a suitable on-device NLU architecture is largely driven by hardware resource constraints. First, on-device systems come with a strict memory budget, restricting our choices to architectures with fewer parameters. Second, the architectures chosen should not only be amenable to model compression, but should result in minimal degradation in performance on compression. Third, on-device models have rigorous latency targets, requiring fast inference. This restricts our choices to simpler, seasoned architectures, like LSTMs and GRUs, that require fewer layers and FLOPs as opposed to the newer computationally intensive transformer-based architectures like BERT. Moreover, on-device inference engines often lack support for sophisticated layers such as self-attention layers. Driven by these constraints and relying on the considerable effectiveness of recurrent architectures (Hakkani-Tr et al., 2016; Liu and Lane, 2016a; Zhang and Wang, 2016), we use a multi-domain, multi-task RNN model (MT-RNN), built using bi-directional LSTMs (Figure 1 ) for performing NLU. We train a single neural model that can jointly perform DC, IC and NER for a given input utterance. Furthermore, in order to reduce inference latency, we use word-level LSTMs as opposed to character or sub-word based models. Architecture Details -Our task model, which we call the MT-RNN model, is shown in Figure 1 . It consists of a shared bi-directional LSTM (Bi-LSTM) to extract features shared by all tasks, and taskspecific layers for the classification and tagging tasks. The input to the recurrent layers are pretrained embeddings and are fine-tuned during training. The input to each of the classification components is a sentence representation, obtained by concatenating the final states of the forward-and the backward-LSTM. This is passed on to a fully-connected dense layer with a softmax to predict the domain and intent for the utterance. The tagging layer produces a slot tag for each word in the utterance. The input at each time step consists of the forward-and backward-LSTM states for each word and the output is the slot tag. We choose the popularly used Conditional Random Fields (CRF) layer for NER. The network is trained to minimize a joint NLU loss defined as the sum of the cross-entropy losses for IC and DC and the CRF loss for NER:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1080, |
|
"end": 1089, |
|
"text": "(Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 1419, |
|
"end": 1427, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "NLU Task Model Architecture", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "L N LU = L DC + L IC + L N ER", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NLU Task Model Architecture", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In the following sections, we describe our approach for compressing the word embeddings and the recurrent components of our MT-RNN model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NLU Task Model Architecture", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Word embeddings have been shown to be the largest components in an NLP model, owing to large vocabulary sizes and floating point parameters, accounting for >90% of the model sizes (Shu and Nakayama, 2017) . Hence, compressing embeddings is crucial for reducing NLP model sizes. Our approach is based on additive quantization (Babenko and Lempitsky, 2014) , which has shown great success in compressing word embeddings, achieving high compression rates (Shu and Nakayama, 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 204, |
|
"text": "(Shu and Nakayama, 2017)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 354, |
|
"text": "(Babenko and Lempitsky, 2014)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 476, |
|
"text": "(Shu and Nakayama, 2017)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embedding Compression", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Additive quantization (Babenko and Lempitsky, 2014) aims to approximate vectors by representing them as a sum of basis vectors, called codewords. Originally proposed for image compression and approximate nearest neighbor search, this method has recently been used for post-processing word embedding compression (Chen et al., 2018; Shu and Nakayama, 2017) achieving high compression rates, upwards of 90%, on modest vocabulary sizes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 51, |
|
"text": "(Babenko and Lempitsky, 2014)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 330, |
|
"text": "(Chen et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 331, |
|
"end": 354, |
|
"text": "Shu and Nakayama, 2017)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additive Quantization using Deep Compositional Code Learning", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Let W \u2208 R V \u00d7D be the original word embedding matrix, where V denotes the vocabulary size and D denotes the embedding size. Using additive quantization, the original word embedding matrix is compressed into a matrix of integer codes as W c \u2208 Z K V \u00d7M , where Z K denotes the set of integers from 1 to K,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additive Quantization using Deep Compositional Code Learning", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Z K = {1, 2, . . . , K}. This is achieved using a set of M codebooks, C 1 through C M , C m \u2208 R K\u00d7D , each containing K codewords of size D. C k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additive Quantization using Deep Compositional Code Learning", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "m is the k th codeword in the m th codebook. For each word embedding w i in W , the compressed codes can be w ci , where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additive Quantization using Deep Compositional Code Learning", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "w ci = [z i 1 , z i 2 , . . . , z i M ] where z i m \u2208 Z K , \u2200m \u2208 {1, 2, . . . , M }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additive Quantization using Deep Compositional Code Learning", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "The original word embedding w i is approximated from the codes and codebooks as w i by summing the (z i m ) th codeword in the m th codebook over all codebooks: Shu and Nakayama (2017) propose the deep compositional code learning (DCCL) architecture to learn discrete codes and codebooks for a given word embedding matrix through an unsupervised autoencoding task. In this model, a continuous word vector input, w i \u2208 R D is first projected into a lower dimensional space using a linear transformation. This is projected through a second linear layer into M different K-dimensional vectors. Each of these M vectors is passed through a gumbel-softmax activation to get M one-hot vectors, r i m \u2208 R 1\u00d7K :", |
|
"cite_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 184, |
|
"text": "Shu and Nakayama (2017)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additive Quantization using Deep Compositional Code Learning", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "w i = M m=1 C z i m m", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additive Quantization using Deep Compositional Code Learning", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "r i m = \u03c3 G (f L (w i )) \u2200m \u2208 {1, 2, . . . , M }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additive Quantization using Deep Compositional Code Learning", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "where f L denotes the linear transformations and \u03c3 G denotes the gumbel-softmax activation. The gumbel-softmax activation allows the network to learn discrete codes via gumbel-sampling, while also making the network differentiable, enabling the backpropagation of gradients (Jang et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 274, |
|
"end": 293, |
|
"text": "(Jang et al., 2016)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additive Quantization using Deep Compositional Code Learning", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "These one-hot vectors are converted to integer codes corresponding to the input word embedding. In order to reconstruct the word embedding, the following operations are performed: V , the size of the codes and codebooks can be greatly reduced when compared to the original embedding matrix.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additive Quantization using Deep Compositional Code Learning", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "w i = M m=1 r i m * C m where r i m \u2208 R 1\u00d7K , C m \u2208 R K\u00d7D , w i \u2208 R 1\u00d7D (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additive Quantization using Deep Compositional Code Learning", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Shu and Nakayama (2017) propose to use the DCCL architecture to perform post-processing embedding compression, where embeddings are compressed after the downstream task model has been trained. The task model is first initialized with pretrained word embeddings that are fine-tuned during task model training to obtain task-specific embeddings. These are compressed using the DCCL architecture trained on an unsupervised autoencoding task. The input to the autoencoder is the embedding matrix W \u2208 R V \u00d7D and the model is trained to minimize the average embedding reconstruction loss (denoted by l(W, W )) for words in the embedding matrix:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-agnostic Post-Processing Compression", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "l(W, W ) = 1 V V i=1 (w i \u2212 w i ) 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-agnostic Post-Processing Compression", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "DCCL is shown to outperform other approaches such as parameter pruning and product quantization on sentence classification and machine translation tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-agnostic Post-Processing Compression", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Since compression is performed as a post-processing step after the task model is trained, the compression algorithm has no information about the downstream task, making the compression task-agnostic and results in several drawbacks. First, unsupervised post-processing compression treats all words equally for compression. However, in practice, some words may be more important than others for the downstream task. Hence, better reconstructions of more important words may benefit the downstream task. Second, post-processing compression typically is lossy resulting in a degradation in downstream performance since the task model is not adapted to the compression error. We propose a task-aware end-to-end compression approach which aims to address these issues. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-agnostic Post-Processing Compression", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Our algorithm improves on the above said approach, by training the DCCL a.k.a. the compression model, jointly with the downstream task model (Figure 3) . End-to-end training allows the compression model to receive signals about the downstream task, thus adapting the compression to the downstream task. Intuitively, since the compression model now has the information about how the words are used in the downstream task (via the downstream loss), it can spend more network capacity in achieving better reconstructions for more important words. At the same time, the downstream task model also adapts to the lossy reconstructions learned by the compression model, thus improving on the downstream performance. We call this task-aware end-to-end compression, where the compression algorithm takes the downstream task loss into account during embedding compression.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 151, |
|
"text": "(Figure 3)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task-aware End-to-End Compression", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "In order to perform task-aware compression with a DCCL model, we replace the original embedding lookup operations in the task model with layers from the DCCL model a.k.a. the compression layers. The input to our model is now a sequence of L word embeddings corresponding to words from the input text utterances. These are passed through the compression layers and are reconstructed, as shown in equation 1, to obtain a sequence of D dimensional word representations corresponding to each word in the input. The word representation is then fed to the recurrent layers in the task model and the remaining network is unchanged. The entire setup is trained end-to-end to minimize the downstream task loss and the gradients are back-propagated through the entire network, including the compression layers. Further, the compression layers can be initialized with pretrained model parameters from the task-agnostic DCCL model, and the NLU layers can be initialized from a trained NLU model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-aware End-to-End Compression", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "Training an end-to-end DCCL model is tricky, especially when the number and size of codebooks is large. The stochasticity introduced by gumbel-sampling can easily stray off the training, leading to sub-optimal convergence. For these cases, we ground the training by adding the word embedding reconstruction loss to the downstream task loss as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-aware End-to-End Compression", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "L = L N LU + L e where L e = 1 N N i=1 w i \u2212 w i 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-aware End-to-End Compression", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "Adding the embedding reconstruction loss not only stabilizes the training, but also provides stronger gradients to the compression layers. Note that unlike task-agnostic compression where all words are treated equally for compression, the embedding reconstruction loss term in task-aware compression considers only the words appearing the in the input batch. This ensures that the words that are more frequent in the training data have better reconstructions, resulting in better downstream performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-aware End-to-End Compression", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "Quantization (Hubara et al., 2017 ) is a simple and effective technique for model compression. Quantization maps each floating point model paramater to its closest representative from a pre-chosen set of floating-point values. More concretely, the model parameter range is divided into B equally spaced bins (or buckets), and each parameter is assigned its closest bin. The bins can be represented by integer indices and require at most log 2 B bits. For instance, with 256 bins, a 32-bit floating point parameter can represented by an integer bin index occupying just 8 bits. We apply post-training 8-bit linear quantization to quantize the recurrent layers of the model. Since 32-bit floating point model parameters are now represented by 8-bit integers, this results in an instant 4\u00d7 compression. Furthermore, quantization improves model latency, as all the floating point operations are performed using integers. While more sophisticated compression techniques exist for compressing recurrent layers, we found that quantization was extremely effective and resulted in no degradation in performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 33, |
|
"text": "(Hubara et al., 2017", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recurrent Layer Compression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In this section we describe the datasets used and our experimental setup for model compression. While our approach is generically applicable to any NLP task that uses word embeddings, we show the effectiveness of our approach on the three NLU tasks -DC, IC, and NER. We show our results on a large scale commercial NLU system trained across a large number of intents with huge vocabularies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Dataset. We use annotated live traffic data of a large-scale, cloud based, commercial VA system to train our NLU models. Utterances from the live traffic are randomly sampled and anonymized to remove any customer specific information. They are then annotated by skilled annotators for the NLU labels corresponding to the domain, intent and slot labels for each utterance. The training set chosen for our experiments contains millions of utterances spanning 5 domains, and over 150 intents and slots. One of these domains is the 'Out of domain' (or OOD) domain, consisting of utterances not supported by the NLU system. The intent for these utterances is labeled as the 'OODIntent' and the words are given the 'Other' slot tag. Our held-out test set is prepared by randomly sampling 1 million utterances from the live-production traffic, following a similar process. In order to facilitate optimization and early stopping, we also use a validation set of a similar scale.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Evaluation Metrics. We use the following metrics for evaluating the performance on the NLU tasks:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Intent Recognition Error Rate (IRER): This is the ratio of number of incorrect interpretations to the total number of utterances. A correct interpretation is when the predicted domain, intent and all slots for an utterances are correct. We compute the IRER only on non-OOD utterances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Intent Classification Error Rate (ICER): This is the ratio of number of incorrect intent predictions to the total number of utterances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Domain Classification Error Rate (DCER): This is the ratio of number of incorrect domain predictions to the total number of utterances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Slot Error Rate (SER): This is the ratio of number of incorrect slot predictions to the total number of slots.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "False Accept Rate (FAR): This is the ratio of number of out-of-domain utterances falsely accepted as a supported utterance to the total number of out-of-domain utterances. This metric is mainly used to evaluate the effectiveness of the model in rejecting out-of-domain (or unsupported) utterances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Along with the above metrics we also compute the sizes of the word embeddings and the MT-RNN task model. We only report relative changes in the above metrics compared to the baseline. NLU Model Training. We train the NLU task model (the MT-RNN model) described in Section 3.1 using the prepared training dataset (Section 4). We initialize the embeddings with FastText (Joulin et al., 2016) embeddings that have been pretrained on a large corpus of unannotated, anonymized, live utterances. The model is trained to minimize the NLU loss L N LU as described in Section 3.1 and the embeddings are fine-tuned during training. The models are trained for a total of 25 epochs, with early stopping on the validation loss, using Adam optimizer with a learning rate of 0.0001. We further perform a grid search on a range of hyperparameter values for dropout and variational dropout and select the best performing model as our candidate model for compression. This model also serves as our uncompressed baseline. Baselines. We compare our proposed approach with the following baselines. We use the abbreviations 'TAg.' for 'Task Agnostic' and 'TAw.' for 'Task Aware'.", |
|
"cite_spans": [ |
|
{ |
|
"start": 368, |
|
"end": 389, |
|
"text": "(Joulin et al., 2016)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "TAg. SVD: In this approach, large embedding matrices are factorized into matrices of much smaller sizes to produce low-rank approximations of the original embedding matrix, using Singular Value Decomposition (SVD). This is applied as an offline compression method where the embedding matrices are compressed as a post-processing step.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "TAw. SVD: Acharya et al. 2019propose a task-aware SVD-based embedding compression approach, where the embedding matrix is first factorized into lower dimensional matrices using SVD. The factors are then used to initialize a smaller word embedding layer followed by a linear layer, and jointly finetuned with the downstream task model. Stochastic Gradient Descent (SGD) with a learning rate of 0.001 as presented in Acharya et al. (2019) is used for the optimizer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 415, |
|
"end": 436, |
|
"text": "Acharya et al. (2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "TAg. DCCL: Task-agnostic compression method proposed by Shu and Nakayama (2017) where the code learning autoencoder described in Section 3.2 is used to compress word-embeddings from the trained NLU model. Since it does not perform joint training of the compression layers with the downstream task, this serves as an ablation test for our proposed task-aware compression approach.", |
|
"cite_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 79, |
|
"text": "Shu and Nakayama (2017)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "TAg. DCCL + NLU Finetuning: This is another ablation test for our proposed task-aware compression approach. In this approach, task-agnostic compression is performed as in the previous baseline. Once compressed in a task-agnostic way, the embeddings are kept frozen and the downstream task model is fine-tuned to minimize the downstream NLU loss. NLU model fine-tuning is performed with a learning rate of 0.0001 for 5 epochs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For all SVD-based approaches, we run experiments over a range of values for n where n is the fraction of components retrained in the low-rank SVD approximation. This produces models of different sizes. For all DCCL-based baselines, we train the task-agnostic autoencoder model for 300 epochs (approximately 800k iterations) with a learning rate of 0.0001 using the Adam optimizer. We experiment with a range of values for hyperparameters M and K where M is the number of codebooks and K is the number of basis vectors per codebook. Different values of M and K produce models of different sizes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Implementation details. Our approach is essentially a task-aware version of DCCL (TAw. DCCL). In our method, the compression layers are initialized with the parameters from the trained autoencoder model, obtained as a result of task-agnostic post-processing compression. Similarly, the NLU specific layers are initialized from the trained NLU model. The entire compression model is then trained end-toend to minimize the loss function as mentioned in Section 3.2.3. The model is trained with a learning rate of 0.0001 for 5 epochs. Similar to the above task-agnostic setups, we experiment with a range of values for M and K. We further explore the following additional setups:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Without pretraining: In this setup, the compression layers and the task model are jointly trained from scratch and are not initialized from pretrained components. The model is trained to minimize the joint NLU loss without the embedding reconstruction loss. We use the Adam optimizer with a learning rate Without embedding reconstruction loss: In this approach, we do not add the embedding reconstruction loss to the downstream task loss. The models are, however, initialized from pretrained components, and trained end-to-end for 5 epochs. Table 1 summarizes the impact of various word embedding compression approaches on the downstream IRER metric for a range of compression rates. Compression rate is determined by dividing the uncompressed embedding (or model) size by the compressed embedding (or model) size. We report percentage relative changes 2 to the IRER when compared to the uncompressed baseline. The results presented are for 300 dimensional embeddings. However, similar trends were observed for 100 dimensional embeddings as well.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 541, |
|
"end": 548, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In general, we find that task-aware approaches perform better than task-agnostic post-processing approaches. This is because the task-aware end-to-end compression tunes the compression to the downstream task, while also adjusting the task model parameters to recover performance due to lossy reconstructions. From Table 1 we also find that for any given compression rate, our proposed task-aware DCCL approach has the least degradation in predictive performance when compared to other methods.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 321, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Task-aware DCCL outperforms even the best task-agnostic compression baseline (TAg. DCCL + NLU Fine-tuning) by 39-44% at each of the different compression rates. This shows that the loss signal from the downstream task helps performance by not only adapting the task model to the compression, but also by improving compression quality. Moreover, our model at 120\u00d7 compression rate performs better than the best baseline even at 60\u00d7 compression rate. In other words, our models are 2\u00d7 smaller than even the best baseline for a similar performance. We also find that the embedding reconstruction loss added to the downstream task loss helps improve the downstream performance, especially when the compression rate is lower i.e. when the gumbel-sampling layers are larger or more in number.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In order to understand the importance of task-aware compression, we plot the word embedding reconstruction loss (Figure 4) for the top most frequent words in our dataset. As seen in Figure 4 , the average reconstruction loss for task-agnostic DCCL remains approximately constant irrespective of frequency of the words, indicating that all words are treated equally. In contrast, task-aware compression reduces the average reconstruction loss for more frequent words indicating that the network capacity is spent to learn better reconstructions for words more important for the downstream task. Note that the model used for the graph is the task-aware DCCL model without the reconstruction loss term.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 122, |
|
"text": "(Figure 4)", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 190, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We also find that DCCL-based approaches consistently performed better than their SVD counterparts, in both task-aware and task-agnostic variants. SVD-based approaches do not perform well beyond a specific compression rate (+7.99% for 1.7\u00d7 compression). On investigating, we found that word embeddings were full rank matrices, with high singular values for all components, indicating that these components captured high variance. Table 2 presents a summary of the performance of the best models for each of the approaches at around 60\u00d7 embedding compression rate. 8-bit Bi-LSTM quantization helps reduce the size of the recurrent layers in the models, resulting in a net model compression ratio of 39.5\u00d7 with a minimal performance degradation of 3.69% when compared to the uncompressed baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 429, |
|
"end": 436, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we present approaches for extreme model compression for performing natural language understanding on resource-constrained device. We use a unified multi-domain, multi-task neural model that performs DC, IC and NER for all supported domains. We discuss model compression approaches to compress the bulkiest components of our models -the word embeddings, and propose a task-aware end-to-end compression method based on deep compositional code learning where we jointly train the compression layers with the downstream task. This approach reduced word embeddings sizes to just a few MB, achieving a word-embedding compression rate of 98.4% and outperforms all other taskagnostic and task-aware embedding compression baselines. We further apply post-training 8-bit linear quantization to compress the recurrent layers of the model. These approaches together result in a net model compression rate of 97.5%, with a minimal performance degradation of 3.64% when compared to the uncompressed model baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "DCCL approaches are complementary to other compression approaches such as knowledge distillation and model pruning. While our work demonstrates the effectiveness of task-aware DCCL on the classification and tagging tasks in NLU, the approach itself is generic and can be applied to other NLP tasks that rely on large word-embeddings. As part of future work, we would like to explore the effectiveness of task-aware DCCL on NLP tasks such as machine translation and language modeling. We would also like to explore compression of models with advanced architectures using contextual embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://www.statista.com/statistics/973815/worldwide-digital-voice-assistant-in-use/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Absolute numbers are not provided due to commercial confidentiality requirements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Online embedding compression for text classification using low rank matrix factorization", |
|
"authors": [ |
|
{ |
|
"first": "Anish", |
|
"middle": [], |
|
"last": "Acharya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angeliki", |
|
"middle": [], |
|
"last": "Metallinou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Inderjit", |
|
"middle": [], |
|
"last": "Dhillon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "6196--6203", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anish Acharya, Rahul Goel, Angeliki Metallinou, and Inderjit Dhillon. 2019. Online embedding compression for text classification using low rank matrix factorization. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 6196-6203.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Fixed point optimization of deep convolutional neural networks for object recognition", |
|
"authors": [ |
|
{ |
|
"first": "Sajid", |
|
"middle": [], |
|
"last": "Anwar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyuyeon", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wonyong", |
|
"middle": [], |
|
"last": "Sung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1131--1135", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sajid Anwar, Kyuyeon Hwang, and Wonyong Sung. 2015. Fixed point optimization of deep convolutional neural networks for object recognition. In 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1131-1135. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Additive quantization for extreme vector compression", |
|
"authors": [ |
|
{ |
|
"first": "Artem", |
|
"middle": [], |
|
"last": "Babenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Lempitsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "931--938", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Artem Babenko and Victor Lempitsky. 2014. Additive quantization for extreme vector compression. In Proceed- ings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 931-938.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "End-toend attention-based large vocabulary speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Chorowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitriy", |
|
"middle": [], |
|
"last": "Serdyuk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "2016 IEEE international conference on acoustics, speech and signal processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4945--4949", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Jan Chorowski, Dmitriy Serdyuk, Philemon Brakel, and Yoshua Bengio. 2016. End-to- end attention-based large vocabulary speech recognition. In 2016 IEEE international conference on acoustics, speech and signal processing (ICASSP), pages 4945-4949. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Compressing neural networks with the hashing trick", |
|
"authors": [ |
|
{ |
|
"first": "Wenlin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Wilson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Tyree", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kilian", |
|
"middle": [], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yixin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International conference on machine learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2285--2294", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenlin Chen, James Wilson, Stephen Tyree, Kilian Weinberger, and Yixin Chen. 2015. Compressing neural networks with the hashing trick. In International conference on machine learning, pages 2285-2294.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Learning k-way d-dimensional discrete codes for compact embedding representations", |
|
"authors": [ |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yizhou", |
|
"middle": [], |
|
"last": "Martin Renqiang Min", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1806.09464" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ting Chen, Martin Renqiang Min, and Yizhou Sun. 2018. Learning k-way d-dimensional discrete codes for compact embedding representations. arXiv preprint arXiv:1806.09464.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Snips voice platform: an embedded spoken language understanding system for private-by-design voice interfaces", |
|
"authors": [ |
|
{ |
|
"first": "Alice", |
|
"middle": [], |
|
"last": "Coucke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alaa", |
|
"middle": [], |
|
"last": "Saade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrien", |
|
"middle": [], |
|
"last": "Ball", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Th\u00e9odore", |
|
"middle": [], |
|
"last": "Bluche", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Caulier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Leroy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cl\u00e9ment", |
|
"middle": [], |
|
"last": "Doumouro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thibault", |
|
"middle": [], |
|
"last": "Gisselbrecht", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francesco", |
|
"middle": [], |
|
"last": "Caltagirone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thibaut", |
|
"middle": [], |
|
"last": "Lavril", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1805.10190" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alice Coucke, Alaa Saade, Adrien Ball, Th\u00e9odore Bluche, Alexandre Caulier, David Leroy, Cl\u00e9ment Doumouro, Thibault Gisselbrecht, Francesco Caltagirone, Thibaut Lavril, et al. 2018. Snips voice platform: an embedded spoken language understanding system for private-by-design voice interfaces. arXiv preprint arXiv:1805.10190.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Speech recognition with deep recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Graves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Abdel-Rahman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "2013 IEEE international conference on acoustics, speech and signal processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6645--6649", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Graves, Abdel-rahman Mohamed, and Geoffrey Hinton. 2013. Speech recognition with deep recurrent neural networks. In 2013 IEEE international conference on acoustics, speech and signal processing, pages 6645-6649. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Joint semantic utterance classification and slot filling with recursive neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gokhan", |
|
"middle": [], |
|
"last": "Tur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yih", |
|
"middle": [], |
|
"last": "Wen-Tau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Zweig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "2014 IEEE Spoken Language Technology Workshop (SLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "554--559", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Guo, Gokhan Tur, Wen-tau Yih, and Geoffrey Zweig. 2014. Joint semantic utterance classification and slot filling with recursive neural networks. In 2014 IEEE Spoken Language Technology Workshop (SLT), pages 554-559. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Multi-domain joint semantic frame parsing using bi-directional rnn-lstm", |
|
"authors": [ |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gokhan", |
|
"middle": [], |
|
"last": "Tur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asli", |
|
"middle": [], |
|
"last": "Celikyilmaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun-Nung Vivian", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ye-Yi", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of The 17th Annual Meeting of the International Speech Communication Association (INTERSPEECH 2016). ISCA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dilek Hakkani-Tr, Gokhan Tur, Asli Celikyilmaz, Yun-Nung Vivian Chen, Jianfeng Gao, Li Deng, and Ye-Yi Wang. 2016. Multi-domain joint semantic frame parsing using bi-directional rnn-lstm. In Proceedings of The 17th Annual Meeting of the International Speech Communication Association (INTERSPEECH 2016). ISCA, June.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Learning both weights and connections for efficient neural network", |
|
"authors": [ |
|
{ |
|
"first": "Song", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Pool", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Dally", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1135--1143", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Song Han, Jeff Pool, John Tran, and William Dally. 2015. Learning both weights and connections for efficient neural network. In Advances in neural information processing systems, pages 1135-1143.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Deep neural networks for acoustic modeling in speech recognition: The shared views of four research groups", |
|
"authors": [ |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Dahl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdel-Rahman", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navdeep", |
|
"middle": [], |
|
"last": "Jaitly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Senior", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Vanhoucke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tara", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Sainath", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "IEEE Signal processing magazine", |
|
"volume": "29", |
|
"issue": "6", |
|
"pages": "82--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Geoffrey Hinton, Li Deng, Dong Yu, George E Dahl, Abdel-rahman Mohamed, Navdeep Jaitly, Andrew Senior, Vincent Vanhoucke, Patrick Nguyen, Tara N Sainath, et al. 2012. Deep neural networks for acoustic modeling in speech recognition: The shared views of four research groups. IEEE Signal processing magazine, 29(6):82- 97.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Distilling the knowledge in a neural network", |
|
"authors": [ |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1503.02531" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. 2015. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Quantized neural networks: Training neural networks with low precision weights and activations", |
|
"authors": [ |
|
{ |
|
"first": "Itay", |
|
"middle": [], |
|
"last": "Hubara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthieu", |
|
"middle": [], |
|
"last": "Courbariaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Soudry", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "The Journal of Machine Learning Research", |
|
"volume": "18", |
|
"issue": "1", |
|
"pages": "6869--6898", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Itay Hubara, Matthieu Courbariaux, Daniel Soudry, Ran El-Yaniv, and Yoshua Bengio. 2017. Quantized neu- ral networks: Training neural networks with low precision weights and activations. The Journal of Machine Learning Research, 18(1):6869-6898.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Fixed-point feedforward deep neural network design using weights+ 1, 0, and-1", |
|
"authors": [ |
|
{ |
|
"first": "Kyuyeon", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wonyong", |
|
"middle": [], |
|
"last": "Sung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "IEEE Workshop on Signal Processing Systems (SiPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyuyeon Hwang and Wonyong Sung. 2014. Fixed-point feedforward deep neural network design using weights+ 1, 0, and-1. In 2014 IEEE Workshop on Signal Processing Systems (SiPS), pages 1-6. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Categorical reparameterization with gumbel-softmax", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Jang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shixiang", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Poole", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.01144" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Jang, Shixiang Gu, and Ben Poole. 2016. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Triangular-chain conditional random fields", |
|
"authors": [ |
|
{ |
|
"first": "Minwoo", |
|
"middle": [], |
|
"last": "Jeong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gary Geunbae", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "IEEE Transactions on Audio, Speech, and Language Processing", |
|
"volume": "16", |
|
"issue": "7", |
|
"pages": "1287--1302", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minwoo Jeong and Gary Geunbae Lee. 2008. Triangular-chain conditional random fields. IEEE Transactions on Audio, Speech, and Language Processing, 16(7):1287-1302.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Fasttext.zip: Compressing text classification models", |
|
"authors": [ |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthijs", |
|
"middle": [], |
|
"last": "Douze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H\u00e9rve", |
|
"middle": [], |
|
"last": "J\u00e9gou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1612.03651" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, Matthijs Douze, H\u00e9rve J\u00e9gou, and Tomas Mikolov. 2016. Fasttext.zip: Compressing text classification models. arXiv preprint arXiv:1612.03651.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Onenet: Joint domain, intent, slot prediction for spoken language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Young-Bum", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungjin", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karl", |
|
"middle": [], |
|
"last": "Stratos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "547--553", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Young-Bum Kim, Sungjin Lee, and Karl Stratos. 2017. Onenet: Joint domain, intent, slot prediction for spoken language understanding. In 2017 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU), pages 547-553. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Attention-based recurrent neural network models for joint intent detection and slot filling", |
|
"authors": [ |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Lane", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bing Liu and Ian Lane. 2016a. Attention-based recurrent neural network models for joint intent detection and slot filling. Interspeech 2016, Sep.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Joint online spoken language understanding and language modeling with recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Lane", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1609.01462" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bing Liu and Ian Lane. 2016b. Joint online spoken language understanding and language modeling with recurrent neural networks. arXiv preprint arXiv:1609.01462.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Personalized speech recognition on mobile devices", |
|
"authors": [ |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Mcgraw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rohit", |
|
"middle": [], |
|
"last": "Prabhavalkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raziel", |
|
"middle": [], |
|
"last": "Alvarez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Montse", |
|
"middle": [], |
|
"last": "Gonzalez Arenas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kanishka", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Rybach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ouais", |
|
"middle": [], |
|
"last": "Alsharif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ha\u015fim", |
|
"middle": [], |
|
"last": "Sak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Gruenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fran\u00e7oise", |
|
"middle": [], |
|
"last": "Beaufays", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5955--5959", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian McGraw, Rohit Prabhavalkar, Raziel Alvarez, Montse Gonzalez Arenas, Kanishka Rao, David Rybach, Ouais Alsharif, Ha\u015fim Sak, Alexander Gruenstein, Fran\u00e7oise Beaufays, et al. 2016. Personalized speech recogni- tion on mobile devices. In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5955-5959. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Acoustic modeling using deep belief networks", |
|
"authors": [ |
|
{ |
|
"first": "Abdel-Rahman", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Dahl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "IEEE transactions on audio, speech, and language processing", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "14--22", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdel-rahman Mohamed, George E Dahl, and Geoffrey Hinton. 2011. Acoustic modeling using deep belief networks. IEEE transactions on audio, speech, and language processing, 20(1):14-22.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Simple and effective dimensionality reduction for word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vikas Raunak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1708.03629" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vikas Raunak. 2017. Simple and effective dimensionality reduction for word embeddings. arXiv preprint arXiv:1708.03629.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Recurrent neural network and lstm models for lexical utterance classification", |
|
"authors": [ |
|
{ |
|
"first": "Suman", |
|
"middle": [], |
|
"last": "Ravuri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Sixteenth Annual Conference of the International Speech Communication Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suman Ravuri and Andreas Stolcke. 2015. Recurrent neural network and lstm models for lexical utterance classi- fication. In Sixteenth Annual Conference of the International Speech Communication Association.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Application of deep belief networks for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Ruhi", |
|
"middle": [], |
|
"last": "Sarikaya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Deoras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "IEEE/ACM Transactions on Audio, Speech, and Language Processing", |
|
"volume": "22", |
|
"issue": "4", |
|
"pages": "778--784", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruhi Sarikaya, Geoffrey E Hinton, and Anoop Deoras. 2014. Application of deep belief networks for natural language understanding. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 22(4):778- 784.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Compressing word embeddings via deep compositional code learning", |
|
"authors": [ |
|
{ |
|
"first": "Raphael", |
|
"middle": [], |
|
"last": "Shu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideki", |
|
"middle": [], |
|
"last": "Nakayama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1711.01068" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raphael Shu and Hideki Nakayama. 2017. Compressing word embeddings via deep compositional code learning. arXiv preprint arXiv:1711.01068.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Near-lossless binarization of word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Tissier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christophe", |
|
"middle": [], |
|
"last": "Gravier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amaury", |
|
"middle": [], |
|
"last": "Habrard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "7104--7111", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julien Tissier, Christophe Gravier, and Amaury Habrard. 2019. Near-lossless binarization of word embeddings. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 7104-7111.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Improving the speed of neural networks on cpus", |
|
"authors": [ |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Vanhoucke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Senior", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark Z", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vincent Vanhoucke, Andrew Senior, and Mark Z Mao. 2011. Improving the speed of neural networks on cpus.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Learning structured sparsity in deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunpeng", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yandan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiran", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2074--2082", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Wen, Chunpeng Wu, Yandan Wang, Yiran Chen, and Hai Li. 2016. Learning structured sparsity in deep neural networks. In Advances in neural information processing systems, pages 2074-2082.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Convolutional neural network based triangular crf for joint intent detection and slot filling", |
|
"authors": [ |
|
{ |
|
"first": "Puyang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruhi", |
|
"middle": [], |
|
"last": "Sarikaya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "2013 ieee workshop on automatic speech recognition and understanding", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "78--83", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Puyang Xu and Ruhi Sarikaya. 2013. Convolutional neural network based triangular crf for joint intent detection and slot filling. In 2013 ieee workshop on automatic speech recognition and understanding, pages 78-83. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Contextual domain classification in spoken language understanding systems using recurrent neural network", |
|
"authors": [ |
|
{ |
|
"first": "Puyang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruhi", |
|
"middle": [], |
|
"last": "Sarikaya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "136--140", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Puyang Xu and Ruhi Sarikaya. 2014. Contextual domain classification in spoken language understanding sys- tems using recurrent neural network. In 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 136-140. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Restructuring of deep neural network acoustic models with singular value decomposition", |
|
"authors": [ |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinyu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2365--2369", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jian Xue, Jinyu Li, and Yifan Gong. 2013. Restructuring of deep neural network acoustic models with singular value decomposition. In Interspeech, pages 2365-2369.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Recurrent neural networks for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Kaisheng", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Zweig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mei-Yuh", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yangyang", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2524--2528", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaisheng Yao, Geoffrey Zweig, Mei-Yuh Hwang, Yangyang Shi, and Dong Yu. 2013. Recurrent neural networks for language understanding. In Interspeech, pages 2524-2528.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "A joint model of intent determination and slot filling for spoken language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence", |
|
"volume": "16", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaodong Zhang and Houfeng Wang. 2016. A joint model of intent determination and slot filling for spoken language understanding. In Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelli- gence, IJCAI16, page 29932999. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Incremental network quantization: Towards lossless cnns with low-precision weights", |
|
"authors": [ |
|
{ |
|
"first": "Aojun", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anbang", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiwen", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yurong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1702.03044" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aojun Zhou, Anbang Yao, Yiwen Guo, Lin Xu, and Yurong Chen. 2017. Incremental network quantization: Towards lossless cnns with low-precision weights. arXiv preprint arXiv:1702.03044.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Multi-Domain, Multi-Task Recurrent Architecture for on-device NLU.", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Deep Compositional Code Learning Architecture.", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "provides an overview of the DCCL model. Since the word embedding matrix W can be reconstructed using just the codes W c and the codebooks C = [C i . . . C m ], the original embedding matrix W with V \u00d7 D floating point values need not be stored on-device, thus achieving the required compression. Furthermore, W c would be an integer matrix requiring only M log 2 K bits per embedding and the codebook C requires just M * K * D * 32 bits on disk, where each floating point element takes 32 bits. By choosing M and K", |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Task-aware end-to-end compression with the MT-RNN model.", |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Average Reconstruction loss for top frequent words.", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Relative percentage IRER change for different word embedding compression rates.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "This table shows relative performance metrics and model sizes for different baselines and our proposed approaches. The best models in each category are highlighed in bold. of 0.0001 and train the model for 25 epochs.", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |