|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:29:15.341012Z" |
|
}, |
|
"title": "Language Scaling for Universal Suggested Replies Model", |
|
"authors": [ |
|
{ |
|
"first": "Qianlan", |
|
"middle": [], |
|
"last": "Ying", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Payal", |
|
"middle": [], |
|
"last": "Bajaj", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft", |
|
"location": { |
|
"settlement": "Bellevue", |
|
"region": "Washington", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Budhaditya", |
|
"middle": [], |
|
"last": "Deb", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft", |
|
"location": { |
|
"settlement": "Bellevue", |
|
"region": "Washington", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Bojia", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Milad", |
|
"middle": [], |
|
"last": "Shokouhi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft", |
|
"location": { |
|
"settlement": "Bellevue", |
|
"region": "Washington", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Xia", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft", |
|
"location": { |
|
"settlement": "Bellevue", |
|
"region": "Washington", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Daxin", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We consider the problem of scaling automated suggested replies for Outlook email system to multiple languages. Faced with increased compute requirements and low resources for language expansion, we build a single universal model for improving the quality and reducing run-time costs of our production system. However, restricted data movement across regional centers prevents joint training across languages. To this end, we propose a multitask continual learning framework, with auxiliary tasks and language adapters to learn universal language representation across regions. The experimental results show positive crosslingual transfer across languages while reducing catastrophic forgetting across regions. Our online results on real user traffic show significant gains in CTR and characters saved, as well as 65% training cost reduction compared with per-language models. As a consequence, we have scaled the feature in multiple languages including low-resource markets.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We consider the problem of scaling automated suggested replies for Outlook email system to multiple languages. Faced with increased compute requirements and low resources for language expansion, we build a single universal model for improving the quality and reducing run-time costs of our production system. However, restricted data movement across regional centers prevents joint training across languages. To this end, we propose a multitask continual learning framework, with auxiliary tasks and language adapters to learn universal language representation across regions. The experimental results show positive crosslingual transfer across languages while reducing catastrophic forgetting across regions. Our online results on real user traffic show significant gains in CTR and characters saved, as well as 65% training cost reduction compared with per-language models. As a consequence, we have scaled the feature in multiple languages including low-resource markets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Automated suggested replies or smart replies (SR) assist users to quickly respond with a short, generic, and relevant response, without users having to type in the reply. SR is an increasingly popular feature in many commercial applications such as Gmail, Outlook, Skype, Facebook Messenger, Microsoft Teams, and Uber (Kannan et al., 2016; Henderson et al., 2017a; Shang et al., 2015; Deb et al., 2019; Yue Weng, 2019) . While the initial versions of this feature mostly targeted English users, making it available in multiple languages and markets is important not only from the perspective of product expansion but also from a linguistic inclusivity point of view.", |
|
"cite_spans": [ |
|
{ |
|
"start": 318, |
|
"end": 339, |
|
"text": "(Kannan et al., 2016;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 364, |
|
"text": "Henderson et al., 2017a;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 365, |
|
"end": 384, |
|
"text": "Shang et al., 2015;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 402, |
|
"text": "Deb et al., 2019;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 403, |
|
"end": 418, |
|
"text": "Yue Weng, 2019)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper we consider the problem of rapid scaling of the SR feature to multiple languages for Outlook. To develop such a system at production scale, we are faced with the following challenges.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "-Model management: Language scaling increases the effort of training, deploying, and managing per-language models, which needs to be replicated for each language. In addition, one model per language increases the storage and compute requirements for the production servers, which can increase costs and occurrences of run-time issues.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "-Data constraints: Developing models at production quality requires considerable effort in data collection and management. Due to regional market share and infrastructure constraints, rich and domain-specific data may not be available for all languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "-Data privacy and security policies: Regional policies enforce data to be located in corresponding regions. For example, Spanish and Portuguese data are stored in North American (NAM) clusters while French data is stored in European (EUR) clusters. Data movement across regions is not allowed and this prevents leveraging commonly used multi-lingual co-training methods which require all the data stored to be in the same place.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To reduce the cost of model management, we propose to build a single universal SR model, capable of serving multiple languages and markets. To overcome data constraints, we propose to use augmentation with machine-translated (MT) data for languages without supervised data. To overcome privacy constraints, we propose a continual learning framework, where the model is trained sequentially across regions. To alleviate catastrophic forgetting (French, 1999; McCloskey and Cohen, 1989) in the continual learning process, we reinforce the universal properties via multi-task learning approach with public task-agnostic data, and an adapter-based model architecture that leverages domain-specific SR data and MT data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 443, |
|
"end": 457, |
|
"text": "(French, 1999;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 458, |
|
"end": 484, |
|
"text": "McCloskey and Cohen, 1989)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our experimental results followed with improvements shown on real user traffic illustrate the ef-fectiveness of the approach. As a consequence, we have rapidly scaled the feature in several languages including low-resource markets. Multilingual training for universal models is often very tricky to work in practice (especially with our data constraints). Thus, we demonstrate a significant accomplishment of a multi-lingual SR system running at production scale on millions of users, which saves resources while improving performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The SR feature is similar to open-domain chatbots and task-oriented conversational agents, Henderson et al., 2019b; Fadhil and Schiavo, 2019; Xu et al., 2017; Okuda and Shoda, 2018; Kopp et al., 2018) . In terms of usage, SR is closer to the latter, in that it assists users to complete a reply, instead of continuing an openended dialog. Following commonly used IR-based models in commercial SR applications (Henderson et al., 2017b; Deb et al., 2019) , we use a dual encoder matching model for our SR system.", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 115, |
|
"text": "Henderson et al., 2019b;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 116, |
|
"end": 141, |
|
"text": "Fadhil and Schiavo, 2019;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 158, |
|
"text": "Xu et al., 2017;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 159, |
|
"end": 181, |
|
"text": "Okuda and Shoda, 2018;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 200, |
|
"text": "Kopp et al., 2018)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 434, |
|
"text": "(Henderson et al., 2017b;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 452, |
|
"text": "Deb et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Core SR Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The matching model has two parallel encoders projecting input message and corresponding reply into a common representation space. Different encoders such as feed-forward and BiLSTM layers can be used here (Henderson et al., 2017a; Deb et al., 2019) . More recently, (Devlin et al., 2018; Liu et al., 2019; Yang et al., 2019; Henderson et al., 2019a,b) show considerable improvements with transformer-based pre-trained models. Our English SR model uses a BERT equivalent (Devlin et al., 2018) encoder, while our mono-lingual baselines in other languages use BiLSTM encoders.", |
|
"cite_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 230, |
|
"text": "(Henderson et al., 2017a;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 248, |
|
"text": "Deb et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 266, |
|
"end": 287, |
|
"text": "(Devlin et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 305, |
|
"text": "Liu et al., 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 324, |
|
"text": "Yang et al., 2019;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 351, |
|
"text": "Henderson et al., 2019a,b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 491, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Core SR Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The model is trained on one-on-one messagereply (m-r) pairs from commercial email data. We minimize the symmetric loss function. It is a modified softmax on dot products between m-r encoding in equation 1 where s i,j = e \u03c6(m i )\u2022\u03c6(r j ) . As described in (Deb et al., 2019), it was shown to improve the relevance by targeting at bi-directional conversational constraints.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Core SR Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(mi, ri) = si,i j si,j + k s k,i \u2212 si,i", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Core SR Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "IR-based model requires a fixed response set. To generate that, we collect differentially private (DP) (Gopi et al., 2020) and anonymized replies, filtered for sensitive content from the training data which preserves user privacy while mining actual user responses. Furthermore, we use human curation to edit responses for cultural-sensitivity, genderneutrality, etc. DP filtration requires a large amount of data due to low yields. For low-resource markets, we translate English responses with human curation for cultural adaptation to languages and locales.", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 122, |
|
"text": "(Gopi et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Core SR Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "During prediction, we compute the matching score (\u2022) between the message and pre-computed response set vectors. Similar to (Henderson et al., 2017a; Deb et al., 2019) , we add a language-model (LM) penalty representing the popularity of responses to bias the predictions towards more common ones. Translated responses inherit the penalty score from the corresponding English responses. Using this score in equation 2 we first select top N 1 responses, and down-select to top N 2 after deduplication using lexical clustering, before presenting to users.", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 148, |
|
"text": "(Henderson et al., 2017a;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 166, |
|
"text": "Deb et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Core SR Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Score = \u03c6(mi) \u2022 \u03c6K (r k )) + \u03b1LMK (r k )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Core SR Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "3 Universal SR Model", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Core SR Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The universal SR model consists of parallel encoder architecture trained using symmetric loss function similar to the core SR model. We initialize the m-r encoders with InfoXLM (Chi et al., 2020) , an XLM-Roberta equivalent multi-lingual model as shown in as Figure 1 (a) which creates language-agnostic text representation across 100 languages. The encoder is pre-trained with both publicly available and internal proprietary corpora and has shown good cross-lingual transfer capabilities on benchmarks such as XNLI (Conneau et al., 2018) . Using a universal pre-trained model in itself enables language expansion. However, as we discuss next, data movement constraints made training the universal model tricky, with performance frequently worse than single mono-lingual models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 195, |
|
"text": "(Chi et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 517, |
|
"end": 539, |
|
"text": "(Conneau et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 259, |
|
"end": 267, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Core SR Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Joint training of universal encoders has led to enormous progress on standard benchmarks and industrial applications such as (Ranasinghe and Zampieri, 2020; Gencoglu, 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 125, |
|
"end": 156, |
|
"text": "(Ranasinghe and Zampieri, 2020;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 172, |
|
"text": "Gencoglu, 2020)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Continual Learning", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "However, privacy policies restrict the data movement across geographic clusters. This prevents the joint training at a single compute cluster. As a result, we train the model sequentially in a continual learning fashion by fine-tuning the model in one region, and then continue training in another.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Continual Learning", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The actual sequence of how this is conducted is important. We observed that keeping English at the last stage provides the best performance. This is likely because English data (which frequently contains bilingual data through code-switching) covers a large proportion in pre-training corpora, thus serving as an anchor in subsequent training stage to maintain the universal properties of the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Continual Learning", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Training the SR model in multiple stages can lead to catastrophic forgetting, where new knowledge easily supplants old knowledge. This problem can be alleviated to some extent by freezing layers of the pre-trained encoders but is still significant after the model is fine-tuned with large corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Several papers have leveraged self-supervised pre-training tasks based on bi-lingual parallel corpora to create or enhance cross-lingual representations (Devlin et al., 2018; Chi et al., 2020) . Following such approaches, we experiment with Translation Language Model (TLM) (Lample and Conneau, 2019) in continual learning to preserve the universal properties of the model. A total of 79M translation pairs from WikiMatrix (Schwenk et al., 2019) and MultiParaCrawl (Aulamo et al., 2020) data including the languages considered in production are extracted as training data. In addition, we conduct an ablation study on auxiliary task selection by comparing with Masked Language Model (MLM) (Devlin et al., 2018) trained on 370M samples from Wikipedia.", |
|
"cite_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 174, |
|
"text": "(Devlin et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 192, |
|
"text": "Chi et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 300, |
|
"text": "(Lample and Conneau, 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 445, |
|
"text": "(Schwenk et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 465, |
|
"end": 486, |
|
"text": "(Aulamo et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 689, |
|
"end": 710, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The multi-task training alternates between SR and auxiliary tasks according to a set proportion of mini-batches in an epoch. The proportion controls the trade-offs between the tasks, to achieve the desired levels of performance in the system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Native supervised data (m-r pairs) is currently not available for low-resource languages. In such cases, English data is leveraged to generate pseudo m-r pairs using machine-translation (MT). We utilize MT data in continual learning process with auxiliary tasks, or with adapters (Houlsby et al., 2019) by introducing additional parameters in the transformer layers. When training with adapters, we freeze all parameters except the adapters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 280, |
|
"end": 302, |
|
"text": "(Houlsby et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Augmentation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The production system targets 5 high-resource languages (HRL): Spanish (ES), Portuguese (PT), French (FR), German (DE), Italian (IT) with rich native data, and 5 low-resource languages (LRL): Chinese (ZH), Japanese (JA), Dutch (NL), Czech (CS) and Hungarian (HU) without any supervised data. English (EN) serves as pivot language in our experiments. As shown in Table 1 , the data is distributed across Europe (EUR), North America (NAM) and a dedicated cluster storing MT data for LRL. Data movement across these regions is not allowed. Public task-agnostic data for auxiliary tasks in 8 languages is accessible in all regions. We train the model sequentially in 3 stages as shown in Figure 1(b) . First, we jointly train the model in EUR for FR, DE, and IT. Next, we move the model to NAM and continue train with EN, ES, and PT along with auxiliary task. Finally, in LRL, we train the model on machine translated m-r pairs along with original EN data in 2 different ways:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 362, |
|
"end": 369, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 684, |
|
"end": 695, |
|
"text": "Figure 1(b)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Universal Model Training Loop", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "(1) jointly train with auxiliary task, or (2) infuse the model with low-resource language adapters. In all stages, we freeze the embedding layer of the encoder during fine-tuning. According to previous studies (Lee et al., 2019; Peters et al., 2019) , freezing partial layers can maintain the model quality while reducing training time during fine-tuning. We observed that freezing embedding layer provides a good balance between micro-batch size per GPU (low if no layers are frozen) and learning capacity of the model (low if many layers are frozen).", |
|
"cite_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 228, |
|
"text": "(Lee et al., 2019;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 249, |
|
"text": "Peters et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Universal Model Training Loop", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "For deployment, we create a composite graph with pre-computed response vectors of all languages embedded into the main model. A separate language identifier switches the prediction vectors to the predicted language of the input at run-time. Besides, several auxiliary models are added in online system to decide whether to trigger the universal model according to the characteristics of input message such as length and detected language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Universal Model Graph for Serving", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "The training data is collected and processed without any eyes access from commercial users in Outlook email system. To be more specific, we filter 50M m-r pairs from one-to-one conversations for each high-resource language, and translate 20M m-r pairs for each low-resource language. Considering the m-r length distribution, we truncate m-r pairs to (96, 64) tokens as training data, and filter out messages longer than 96 tokens during inference, so that the model is more focused on providing quick responses to short messages. The response set size for each language is 20K, filtered or trans-created from English native data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In all three stages of training, we use an effective batch size of 16K. We utilize the Adam optimizer (Kingma and Ba, 2014) with weight decay and set peak learning rates as [5e-4, 3e-4, 1e-4] for three stages respectively. We train up to 30 epochs from which the best model is selected based on validation set loss over all languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For MLM/TLM objectives, we use single-token masking, the task proportion is set as 0.5. The final loss of the model is sum of symmetric loss and auxiliary task loss. For adapters, we use the hidden dimension of 256 in the bottleneck architecture and initialize these parameters with a normal distribution of mean 0 and standard deviation 0.01. According to our observation, high standard deviation for initialization can cause divergence. All experiments are conducted with 16 Nvidia V100-32GB GPU cards.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "During prediction, we pick top N 1 = 30 responses according to equation 2, and then cluster the ranked results and down-select N 2 = 3 responses as final prediction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We compute evaluation metrics based on two kinds of evaluation sets. The first test set samples mr pairs, where reply is contained in the response set (GoldenMR) and is used for computing the ranking metric, Mean Reciprocal Rank:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Offline Evaluation Metrics and Sets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "M RR = 1 N N i=1 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Offline Evaluation Metrics and Sets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Rank i , for the top 15 predictions. The second set consists of general m-r pairs (GenMR) where the reply is not restricted to the response set. weighted-ROUGE metrics is computed on final 3 responses with the reference response over uni/bi/tri-grams (W _ROU GE =", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Offline Evaluation Metrics and Sets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "3 i=1 1 w i ROU GE i (Ref, Rep k ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Offline Evaluation Metrics and Sets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": ", with weights of 1 : 2 : 3 proportions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Offline Evaluation Metrics and Sets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We use \u223c50K GoldenMR and 500K GenMR dataset for each language. For languages without native data, an evaluation proxy with MT data is used for model selection before online deployment. We give a higher preference to ROUGE as it showed higher correlation to our online metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Offline Evaluation Metrics and Sets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For the deployed models in production, we measure the following online metrics on real user traffic.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Online Evaluation Metrics", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Click-through rate (CTR): the ratio of the count of replied emails with SR clicks over all emails that the feature is rendered.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Online Evaluation Metrics", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Usage: the ratio of count of replied emails with SR clicks to all replied emails. This captures the contribution of SR to all Email replies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Online Evaluation Metrics", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Char-saved: the average number of characterssaved by clicking the selected reply.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Online Evaluation Metrics", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The model is evaluated on the international markets we are expanding to. English is excluded as EN model is well established. Results on baseline (existing per-language production models) and universal models for high-resource markets are reported in Table 2 . Results targeting new markets without any native data are reported in Table 3 . Entries in the tables are defined as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 251, |
|
"end": 258, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 331, |
|
"end": 338, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "BiLSTM: Per-language (mono-lingual) production models for non-EN markets as the baseline and also the control setting of online A/B tests. Here the encoders have shared embedding size of 320 and 2 BiLSTM layers with hidden size of 300.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "UniPLM-[NAM/EUR]: Universal model created by fine-tuning pre-trained multi-lingual encoders for EUR and NAM regions respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "UniPLM-HRL: The model across the first 2 stages with the universal training loop in Figure 1(b) . In the second stage, the model is fine-tuned along with TLM auxiliary task with multi-lingual unsupervised data. This is the first universal model candidate that breaks down the data boundary across High-Resource Languages (HRL).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 84, |
|
"end": 96, |
|
"text": "Figure 1(b)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Lang For new languages without native data, we continue to train the base universal model (UniPLM-HRL) with MT data with two approaches.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "UniPLM-All-CL: The UniPLM-HRL model exported to LRL region trained with MT data (and native EN data) with SR and TLM multi-task objectives.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "UniPLM-All-ADP: The model trained with MTadapter, with all parameters frozen except for adapters parameters. data augmentation approach involving MT data, with multi-task learning or adapters. Per-language vs. Universal Model: The BiL-STM production models serve as strong baselines and have comparable MRR for UniPLM-NAM in ES and PT (Table 2) . UniPLM-EUR has better performance than the BiLSTM production models. Overall, the Uni-PLM models have comparable or better performance than the monolingual baselines.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 335, |
|
"end": 344, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Reg", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "UniPLM-NAM/EUR vs. UniPLM-HRL: Table 2 also shows no appreciable difference in ROUGE metrics when training the model in 2 stages. In addition, the model outperforms BiL-STM per-language models on MRR on ES, DE, FR, and IT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Quality Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The above two comparisons show that for highresource languages, we do not suffer significant degradation in quality with single stage and twostage universal models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Quality Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Performance on LRL: Table 3 compares the UniPLM-All-CL and UniPLM-All-ADP with UniPLM-HRL model on low-resource languages. While UniPLM-HRL shows poor ranking performance, UniPLM-All-CL significantly improves on all metrics for LRL, while preserving the ROUGE performance on the other 5 languages. With adapters, UniPLM-All-ADP outperforms other models on all metrics in low-resource languages while keeping the performance unchanged (as a result of freezing the UniPLM-HRL model) in both EUR and NAM.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 27, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Quality Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Overall, the results demonstrate the effectiveness of MT data augmentation in low-resource languages. We observe slight performance degradation on EUR and NAM languages caused by continual training on MT data. This may be due to imperfect translation. However we can mitigate these losses with MT-adapters which are quite promising as they increase the parameters by just 4.3% and even improves training efficiency as we can freeze all other parameters during fine tuning. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Quality Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "MLM and TLM auxiliary tasks: Table 4 investigates contributions of auxiliary tasks in UniPLM-HRL model. We remove TLM objective as -TLM which represents continue training only on SR task, and replace TLM with MLM objective as -TLM+MLM which represents joint training with SR and MLM tasks. UniPLM-HRL with TLM task shows improvements over MLM task and also outperforms single SR task for W_ROUGE for all languages except DE. We hypothesize that TLM uses bi-lingual corpora which helps align representations for semantically similar text from different languages in task-specific fine-tuning. Furthermore, TLM objective can be interpreted as maximizing mutual information between cross-lingual contexts implicitly (Chi et al., 2020) . It demonstrates that such inductive biases in auxiliary tasks are important for cross-lingual transfer in universal models. Replay in continual learning: We continue to train the UniPLM-HRL model by rehearsing the old data in EUR as +EUR. In Table 5 , +EUR we see severe regression on NAM languages, despite the improvement on EUR languages. The replay concept in continual learning (McClelland, 1998) fails here due to the two reasons. First, forgetting is the quintessential mode of continual learning. Second, EUR iteration doesn't contain the pivot language English training data. Continual learning requires delicately maintaining the universal properties through knowledge anchors which is difficult to achieve in practice.", |
|
"cite_spans": [ |
|
{ |
|
"start": 713, |
|
"end": 731, |
|
"text": "(Chi et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1117, |
|
"end": 1135, |
|
"text": "(McClelland, 1998)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 36, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 976, |
|
"end": 983, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Studies", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Based on the offline metrics, we selected UniPLM-HRL as the first candidate for online tests in our production system. Using BiLSTM per-language model as the control, we conducted a 2-week A/B test with 5% user traffic for each model per language/region. Table 6 presents the results for different languages. We observe statistically significant gain in ES (CTR) and FR (Char-saved). While there are regressions in other languages, they are not statistically significant (p > 0.5) Overall, the universal model is generally better or at par compared to their mono-lingual baselines. This has allowed us to deploy the universal model to 100% of users in the 5 languages. An extended universal model supporting low-resource languages is getting deployed during the writing of this paper. Compared with per-language separate model building, the effort of model training, inference stack and deployment can be substantially reduced, though the process of training data and response collection, and human evaluation for all our targeted languages are still required. Overall, around 65% training and performance improvement time cost can be saved with one single universal model target at 5 languages. We expect even higher amortized serving costs reductions as the approach is scaled to more languages.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 255, |
|
"end": 262, |
|
"text": "Table 6", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Online Results", |
|
"sec_num": "4.6" |
|
}, |
|
{ |
|
"text": "This paper presents our approach of scaling automated suggested replies with one universal model. Faced with compute resource and data privacy constraints, we propose a multi-task continual learning framework with auxiliary tasks, and data augmentation with adapter-based model architecture. The universal model in production saves significant compute resources and model management overhead, while allowing us to train across regional data boundaries. In addition, the process allows us to cold-start in new markets even when no supervised data exists. Based on the promising offline and online results, we have deployed the model in several languages and plan to extend the process for 20 languages around the world.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Opustools and parallel corpus diagnostics", |
|
"authors": [ |
|
{ |
|
"first": "Mikko", |
|
"middle": [], |
|
"last": "Aulamo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Umut", |
|
"middle": [], |
|
"last": "Sulubacak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sami", |
|
"middle": [], |
|
"last": "Virpioja", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3782--3789", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikko Aulamo, Umut Sulubacak, Sami Virpioja, and J\u00f6rg Tiedemann. 2020. Opustools and parallel cor- pus diagnostics. In Proceedings of The 12th Lan- guage Resources and Evaluation Conference, pages 3782-3789.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Infoxlm: An information-theoretic framework for cross-lingual language model pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Zewen", |
|
"middle": [], |
|
"last": "Chi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saksham", |
|
"middle": [], |
|
"last": "Singhal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenhui", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xia", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xian-Ling", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heyan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2007.07834" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zewen Chi, Li Dong, Furu Wei, Nan Yang, Sak- sham Singhal, Wenhui Wang, Xia Song, Xian- Ling Mao, Heyan Huang, and Ming Zhou. 2020. Infoxlm: An information-theoretic framework for cross-lingual language model pre-training. arXiv preprint arXiv:2007.07834.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Unsupervised cross-lingual representation learning at scale", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kartikay", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Wenzek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.02116" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2019. Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Xnli: Evaluating crosslingual sentence representations", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruty", |
|
"middle": [], |
|
"last": "Rinott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1809.05053" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Guillaume Lample, Ruty Rinott, Ad- ina Williams, Samuel R Bowman, Holger Schwenk, and Veselin Stoyanov. 2018. Xnli: Evaluating cross- lingual sentence representations. arXiv preprint arXiv:1809.05053.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Diversifying reply suggestions using a matchingconditional variational autoencoder", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Budhaditya Deb", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bailey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shokouhi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Budhaditya Deb, P. Bailey, and M. Shokouhi. 2019. Diversifying reply suggestions using a matching- conditional variational autoencoder. In NAACL- HLT.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Designing for health chatbots", |
|
"authors": [ |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Fadhil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gianluca", |
|
"middle": [], |
|
"last": "Schiavo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1902.09022" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ahmed Fadhil and Gianluca Schiavo. 2019. De- signing for health chatbots. arXiv preprint arXiv:1902.09022.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Catastrophic forgetting in connectionist networks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Robert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "French", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Trends in cognitive sciences", |
|
"volume": "3", |
|
"issue": "4", |
|
"pages": "128--135", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert M French. 1999. Catastrophic forgetting in con- nectionist networks. Trends in cognitive sciences, 3(4):128-135.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Large-scale, languageagnostic discourse classification of tweets during covid-19", |
|
"authors": [ |
|
{ |
|
"first": "Oguzhan", |
|
"middle": [], |
|
"last": "Gencoglu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Machine Learning and Knowledge Extraction", |
|
"volume": "2", |
|
"issue": "4", |
|
"pages": "603--616", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oguzhan Gencoglu. 2020. Large-scale, language- agnostic discourse classification of tweets during covid-19. Machine Learning and Knowledge Ex- traction, 2(4):603-616.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Milad Shokouhi, and Sergey Yekhanin. 2020. Differentially private set union", |
|
"authors": [ |
|
{ |
|
"first": "Sivakanth", |
|
"middle": [], |
|
"last": "Gopi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pankaj", |
|
"middle": [], |
|
"last": "Gulhane", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janardhan", |
|
"middle": [], |
|
"last": "Kulkarni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Judy", |
|
"middle": [ |
|
"Hanwen" |
|
], |
|
"last": "Shen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2002.09745" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sivakanth Gopi, Pankaj Gulhane, Janardhan Kulkarni, Judy Hanwen Shen, Milad Shokouhi, and Sergey Yekhanin. 2020. Differentially private set union. arXiv preprint arXiv:2002.09745.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Sanjiv Kumar, Balint Miklos, and Ray Kurzweil. 2017a. Efficient Natural Language Response Suggestion for Smart Reply", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rami", |
|
"middle": [], |
|
"last": "Al-Rfou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "'", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Strope", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun-Hsuan", |
|
"middle": [], |
|
"last": "Sung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e1szl\u00f3", |
|
"middle": [], |
|
"last": "Luk\u00e1cs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruiqi", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Henderson, Rami Al-Rfou', Brian Strope, Yun-Hsuan Sung, L\u00e1szl\u00f3 Luk\u00e1cs, Ruiqi Guo, San- jiv Kumar, Balint Miklos, and Ray Kurzweil. 2017a. Efficient Natural Language Response Suggestion for Smart Reply. CoRR, abs/1705.00652.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Balint Miklos, and Ray Kurzweil. 2017b. Efficient natural language response suggestion for smart reply", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rami", |
|
"middle": [], |
|
"last": "Al-Rfou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Strope", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun-Hsuan", |
|
"middle": [], |
|
"last": "Sung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e1szl\u00f3", |
|
"middle": [], |
|
"last": "Luk\u00e1cs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruiqi", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1705.00652" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Henderson, Rami Al-Rfou, Brian Strope, Yun- Hsuan Sung, L\u00e1szl\u00f3 Luk\u00e1cs, Ruiqi Guo, Sanjiv Ku- mar, Balint Miklos, and Ray Kurzweil. 2017b. Effi- cient natural language response suggestion for smart reply. arXiv preprint arXiv:1705.00652.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Convert: Efficient and accurate conversational representations from transformers", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Inigo", |
|
"middle": [], |
|
"last": "Casanueva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Mrk\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pei-Hao", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsung-Hsien", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.03688" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Henderson, Inigo Casanueva, Nikola Mrk\u0161i\u0107, Pei-Hao Su, Tsung-Hsien Wen, and Ivan Vuli\u0107. 2019a. Convert: Efficient and accurate conver- sational representations from transformers. arXiv preprint arXiv:1911.03688.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Training neural response selection for task-oriented dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli'c", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniela", |
|
"middle": [], |
|
"last": "Gerz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I\u00f1igo", |
|
"middle": [], |
|
"last": "Casanueva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pawe\u0142", |
|
"middle": [], |
|
"last": "Budzianowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Coope", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgios", |
|
"middle": [], |
|
"last": "Spithourakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsung-Hsien", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Mrksi'c", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pei-Hao", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Henderson, Ivan Vuli'c, Daniela Gerz, I\u00f1igo Casanueva, Pawe\u0142 Budzianowski, Sam Coope, Georgios Spithourakis, Tsung-Hsien Wen, Nikola Mrksi'c, and Pei-Hao Su. 2019b. Training neural re- sponse selection for task-oriented dialogue systems. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Parameter-efficient transfer learning for NLP", |
|
"authors": [ |
|
{ |
|
"first": "Neil", |
|
"middle": [], |
|
"last": "Houlsby", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrei", |
|
"middle": [], |
|
"last": "Giurgiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stanislaw", |
|
"middle": [], |
|
"last": "Jastrzebski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bruna", |
|
"middle": [], |
|
"last": "Morrone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quentin", |
|
"middle": [], |
|
"last": "De Laroussilhe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Gesmundo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Attariyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Gelly", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 36th International Conference on Machine Learning", |
|
"volume": "97", |
|
"issue": "", |
|
"pages": "2790--2799", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for NLP. In Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 2790-2799. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Smart Reply: Automated Response Suggestion for Email", |
|
"authors": [ |
|
{ |
|
"first": "Anjuli", |
|
"middle": [], |
|
"last": "Kannan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karol", |
|
"middle": [], |
|
"last": "Kurach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujith", |
|
"middle": [], |
|
"last": "Ravi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tobias", |
|
"middle": [], |
|
"last": "Kaufmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Tomkins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Balint", |
|
"middle": [], |
|
"last": "Miklos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e1szl\u00f3", |
|
"middle": [], |
|
"last": "Luk\u00e1cs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marina", |
|
"middle": [], |
|
"last": "Ganea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "KDD", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anjuli Kannan, Karol Kurach, Sujith Ravi, Tobias Kaufmann, Andrew Tomkins, Balint Miklos, Gre- gory S. Corrado, L\u00e1szl\u00f3 Luk\u00e1cs, Marina Ganea, Pe- ter Young, and Vivek Ramavajjala. 2016. Smart Re- ply: Automated Response Suggestion for Email. In KDD.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6980" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Conversational assistants for elderly users-the importance of socially cooperative dialogue", |
|
"authors": [ |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Kopp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mara", |
|
"middle": [], |
|
"last": "Brandt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hendrik", |
|
"middle": [], |
|
"last": "Buschmeier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katharina", |
|
"middle": [], |
|
"last": "Cyra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Farina", |
|
"middle": [], |
|
"last": "Freigang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicole", |
|
"middle": [], |
|
"last": "Kr\u00e4mer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franz", |
|
"middle": [], |
|
"last": "Kummert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christiane", |
|
"middle": [], |
|
"last": "Opfermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karola", |
|
"middle": [], |
|
"last": "Pitsch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lars", |
|
"middle": [], |
|
"last": "Schillingmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AAMAS Workshop on Intelligent Conversation Agents in Home and Geriatric Care Applications co-located with the Federated AI Meeting", |
|
"volume": "2338", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefan Kopp, Mara Brandt, Hendrik Buschmeier, Katharina Cyra, Farina Freigang, Nicole Kr\u00e4mer, Franz Kummert, Christiane Opfermann, Karola Pitsch, Lars Schillingmann, et al. 2018. Conver- sational assistants for elderly users-the importance of socially cooperative dialogue. In Proceedings of the AAMAS Workshop on Intelligent Conversation Agents in Home and Geriatric Care Applications co-located with the Federated AI Meeting, volume 2338.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Crosslingual language model pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.07291" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample and Alexis Conneau. 2019. Cross- lingual language model pretraining. arXiv preprint arXiv:1901.07291.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "What would elsa do? freezing layers during transformer fine-tuning", |
|
"authors": [ |
|
{ |
|
"first": "Jaejun", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raphael", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.03090" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jaejun Lee, Raphael Tang, and Jimmy Lin. 2019. What would elsa do? freezing layers during transformer fine-tuning. arXiv preprint arXiv:1911.03090.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Complementary learning systems in the brain. a connectionist approach to explicit and implicit cognition and memory", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "James", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mcclelland", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Annals of the New York Academy of Sciences", |
|
"volume": "843", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James L McClelland. 1998. Complementary learning systems in the brain. a connectionist approach to ex- plicit and implicit cognition and memory. Annals of the New York Academy of Sciences, 843:153.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Catastrophic interference in connectionist networks: The sequential learning problem", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Mccloskey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Neal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "Psychology of learning and motivation", |
|
"volume": "24", |
|
"issue": "", |
|
"pages": "109--165", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael McCloskey and Neal J Cohen. 1989. Catas- trophic interference in connectionist networks: The sequential learning problem. In Psychology of learn- ing and motivation, volume 24, pages 109-165. El- sevier.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Ai-based chatbot service for financial industry", |
|
"authors": [ |
|
{ |
|
"first": "Takuma", |
|
"middle": [], |
|
"last": "Okuda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanae", |
|
"middle": [], |
|
"last": "Shoda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Fujitsu Scientific and Technical Journal", |
|
"volume": "54", |
|
"issue": "2", |
|
"pages": "4--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Takuma Okuda and Sanae Shoda. 2018. Ai-based chat- bot service for financial industry. Fujitsu Scientific and Technical Journal, 54(2):4-8.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "To tune or not to tune? adapting pretrained representations to diverse tasks", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1903.05987" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E Peters, Sebastian Ruder, and Noah A Smith. 2019. To tune or not to tune? adapting pretrained representations to diverse tasks. arXiv preprint arXiv:1903.05987.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Multilingual offensive language identification with cross-lingual embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Tharindu", |
|
"middle": [], |
|
"last": "Ranasinghe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.05324" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tharindu Ranasinghe and Marcos Zampieri. 2020. Multilingual offensive language identification with cross-lingual embeddings. arXiv preprint arXiv:2010.05324.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Wikimatrix: Mining 135m parallel sentences in 1620 language pairs from wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuo", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongyu", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.05791" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong, and Francisco Guzm\u00e1n. 2019. Wiki- matrix: Mining 135m parallel sentences in 1620 language pairs from wikipedia. arXiv preprint arXiv:1907.05791.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Neural responding machine for short-text conversation", |
|
"authors": [ |
|
{ |
|
"first": "Lifeng", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1503.02364" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lifeng Shang, Zhengdong Lu, and Hang Li. 2015. Neu- ral responding machine for short-text conversation. arXiv preprint arXiv:1503.02364.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "A new chatbot for customer service on social media", |
|
"authors": [ |
|
{ |
|
"first": "Anbang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yufan", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vibha", |
|
"middle": [], |
|
"last": "Sinha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rama", |
|
"middle": [], |
|
"last": "Akkiraju", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3506--3510", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anbang Xu, Zhe Liu, Yufan Guo, Vibha Sinha, and Rama Akkiraju. 2017. A new chatbot for customer service on social media. In Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems, pages 3506-3510.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5753--5763", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural in- formation processing systems, pages 5753-5763.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Occ: A smart reply system for efficient in-app communications", |
|
"authors": [ |
|
{ |
|
"first": "Franziska", |
|
"middle": [], |
|
"last": "Bell Gokhan Tur Yue Weng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huaixiu", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.08167" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franziska Bell Gokhan Tur Yue Weng, Huaixiu Zheng. 2019. Occ: A smart reply system for efficient in-app communications. arXiv preprint arXiv:1907.08167.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "The design and implementation of xiaoice, an empathetic social chatbot", |
|
"authors": [ |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heung-Yeung", |
|
"middle": [], |
|
"last": "Shum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Computational Linguistics", |
|
"volume": "46", |
|
"issue": "1", |
|
"pages": "53--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li Zhou, Jianfeng Gao, Di Li, and Heung-Yeung Shum. 2020. The design and implementation of xiaoice, an empathetic social chatbot. Computational Linguis- tics, 46(1):53-93.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "(a) Matching model architecture with symmetric loss and TLM/MLM cross-entropy loss. (b) Multi-task continual training loop for EUR->NAM->LRL clusters." |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Regional distribution of training data for different languages.", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Evaluation on HRL (EUR and NAM) with UniPLM-HRL via continual multi-task learning and production baselines. The best results are in bold.", |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>Reg</td><td colspan=\"2\">Lang Model</td><td colspan=\"2\">MRR W_ROUGE</td></tr><tr><td colspan=\"2\">EUR DE</td><td>UniPLM-HRL</td><td>0.3323</td><td>0.0663</td></tr><tr><td/><td/><td>UniPLM-All-CL</td><td>0.3103</td><td>0.0686</td></tr><tr><td/><td>FR</td><td>UniPLM-HRL</td><td>0.4135</td><td>0.0624</td></tr><tr><td/><td/><td>UniPLM-All-CL</td><td>0.4207</td><td>0.0659</td></tr><tr><td/><td>IT</td><td>UniPLM-HRL</td><td>0.4186</td><td>0.0360</td></tr><tr><td/><td/><td>UniPLM-All-CL</td><td>0.4274</td><td>0.0374</td></tr><tr><td colspan=\"2\">NAM ES</td><td>UniPLM-HRL</td><td>0.3319</td><td>0.0552</td></tr><tr><td/><td/><td>UniPLM-All-CL</td><td>0.3160</td><td>0.0551</td></tr><tr><td/><td>PT</td><td>UniPLM-HRL</td><td>0.4154</td><td>0.0563</td></tr><tr><td/><td/><td>UniPLM-All-CL</td><td>0.3783</td><td>0.0561</td></tr><tr><td>LRL</td><td>ZH</td><td>UniPLM-HRL</td><td>0.1365</td><td>0.0740</td></tr><tr><td/><td/><td>UniPLM-All-CL</td><td>0.2638</td><td>0.0869</td></tr><tr><td/><td/><td colspan=\"2\">UniPLM-All-ADP 0.3024</td><td>0.0901</td></tr><tr><td/><td>JA</td><td>UniPLM-HRL</td><td>0.1475</td><td>0.1010</td></tr><tr><td/><td/><td>UniPLM-All-CL</td><td>0.3281</td><td>0.1106</td></tr><tr><td/><td/><td colspan=\"2\">UniPLM-All-ADP 0.3719</td><td>0.1180</td></tr><tr><td/><td>NL</td><td>UniPLM-HRL</td><td>0.0638</td><td>0.0371</td></tr><tr><td/><td/><td>UniPLM-All-CL</td><td>0.1822</td><td>0.0436</td></tr><tr><td/><td/><td colspan=\"2\">UniPLM-All-ADP 0.2490</td><td>0.0480</td></tr><tr><td/><td>CS</td><td>UniPLM-HRL</td><td>0.0366</td><td>0.0386</td></tr><tr><td/><td/><td>UniPLM-All-CL</td><td>0.1312</td><td>0.0441</td></tr><tr><td/><td/><td colspan=\"2\">UniPLM-All-ADP 0.2612</td><td>0.0526</td></tr><tr><td/><td>HU</td><td>UniPLM-HRL</td><td>0.0420</td><td>0.0356</td></tr><tr><td/><td/><td>UniPLM-All-CL</td><td>0.0779</td><td>0.0776</td></tr><tr><td/><td/><td colspan=\"2\">UniPLM-All-ADP 0.2615</td><td>0.0907</td></tr></table>", |
|
"text": "compares the universal model UniPLM-HRL with both per-language baselines and perregion models.Table 3shows the results with the low-resource languages, which are trained with", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "", |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>Reg</td><td colspan=\"2\">Lang Model</td><td>MRR W_ROUGE</td></tr><tr><td colspan=\"2\">EUR DE</td><td colspan=\"2\">UniPLM-HRL 0.3323</td><td>0.0663</td></tr><tr><td/><td/><td/><td>+EUR 0.4272</td><td>0.0708</td></tr><tr><td/><td>FR</td><td colspan=\"2\">UniPLM-HRL 0.4135</td><td>0.0624</td></tr><tr><td/><td/><td/><td>+EUR 0.4818</td><td>0.0660</td></tr><tr><td/><td>IT</td><td colspan=\"2\">UniPLM-HRL 0.4186</td><td>0.0360</td></tr><tr><td/><td/><td/><td>+EUR 0.4851</td><td>0.0388</td></tr><tr><td colspan=\"2\">NAM ES</td><td colspan=\"2\">UniPLM-HRL 0.3319</td><td>0.0552</td></tr><tr><td/><td/><td/><td>+EUR 0.2125</td><td>0.0456</td></tr><tr><td/><td>PT</td><td colspan=\"2\">UniPLM-HRL 0.4154</td><td>0.0563</td></tr><tr><td/><td/><td/><td>+EUR 0.3298</td><td>0.0505</td></tr></table>", |
|
"text": "Results with variations on UniPLM-HRL. -TLM denotes removing TLM and -TLM+MLM denotes replacing with MLM in continual learning.", |
|
"num": null |
|
}, |
|
"TABREF8": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>: Results with 2-stage and replay-based contin-</td></tr><tr><td>ual learning. +EUR denotes replaying UniPLM-HRL</td></tr><tr><td>with EUR m-r pairs.</td></tr></table>", |
|
"text": "", |
|
"num": null |
|
}, |
|
"TABREF10": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Online metrics for UniPLM-HRL model. The control model is BiLSTM in each language. The numbers with p-val < 0.05 are in bold.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |