|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:55:47.775825Z" |
|
}, |
|
"title": "XPersona: Evaluating Multilingual Personalized Chatbot", |
|
"authors": [ |
|
{ |
|
"first": "Zhaojiang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Hong Kong University of Science and Technology", |
|
"location": { |
|
"addrLine": "Clear Water Bay", |
|
"settlement": "Hong Kong" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Hong Kong University of Science and Technology", |
|
"location": { |
|
"addrLine": "Clear Water Bay", |
|
"settlement": "Hong Kong" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Genta", |
|
"middle": [], |
|
"last": "Indra Winata", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Hong Kong University of Science and Technology", |
|
"location": { |
|
"addrLine": "Clear Water Bay", |
|
"settlement": "Hong Kong" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Cahyawijaya", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Hong Kong University of Science and Technology", |
|
"location": { |
|
"addrLine": "Clear Water Bay", |
|
"settlement": "Hong Kong" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Madotto", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Hong Kong University of Science and Technology", |
|
"location": { |
|
"addrLine": "Clear Water Bay", |
|
"settlement": "Hong Kong" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Bang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Hong Kong University of Science and Technology", |
|
"location": { |
|
"addrLine": "Clear Water Bay", |
|
"settlement": "Hong Kong" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Etsuko", |
|
"middle": [], |
|
"last": "Ishii", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Hong Kong University of Science and Technology", |
|
"location": { |
|
"addrLine": "Clear Water Bay", |
|
"settlement": "Hong Kong" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Pascale", |
|
"middle": [], |
|
"last": "Fung", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Hong Kong University of Science and Technology", |
|
"location": { |
|
"addrLine": "Clear Water Bay", |
|
"settlement": "Hong Kong" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Personalized dialogue systems are an essential step toward better human-machine interaction. Existing personalized dialogue agents rely on properly designed conversational datasets, which are mostly monolingual (e.g., English), which greatly limits the usage of conversational agents in other languages. In this paper, we propose a multilingual extension of Persona-Chat (Zhang et al., 2018), namely XPersona. Our dataset includes persona conversations in six different languages other than English for evaluating multilingual personalized agents. We experiment with both multilingual and cross-lingual trained baselines, and evaluate them against monolingual and translation-pipeline models using both automatic and human evaluation. Experimental results show that the multilingual trained models outperform the translation-pipeline and that they are on par with the monolingual models, with the advantage of having a single model across multiple languages. On the other hand, the state-of-the-art cross-lingual trained models achieve inferior performance to the other models, showing that cross-lingual conversation modeling is a challenging task. We hope that our dataset and baselines 1 will accelerate research in multilingual dialogue systems.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Personalized dialogue systems are an essential step toward better human-machine interaction. Existing personalized dialogue agents rely on properly designed conversational datasets, which are mostly monolingual (e.g., English), which greatly limits the usage of conversational agents in other languages. In this paper, we propose a multilingual extension of Persona-Chat (Zhang et al., 2018), namely XPersona. Our dataset includes persona conversations in six different languages other than English for evaluating multilingual personalized agents. We experiment with both multilingual and cross-lingual trained baselines, and evaluate them against monolingual and translation-pipeline models using both automatic and human evaluation. Experimental results show that the multilingual trained models outperform the translation-pipeline and that they are on par with the monolingual models, with the advantage of having a single model across multiple languages. On the other hand, the state-of-the-art cross-lingual trained models achieve inferior performance to the other models, showing that cross-lingual conversation modeling is a challenging task. We hope that our dataset and baselines 1 will accelerate research in multilingual dialogue systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Personalized dialogue agents have been shown efficient in conducting human-like conversation. This progress has been catalyzed thanks to existing conversational dataset such as Persona-chat (Zhang et al., 2018; Dinan et al., 2019a) . However, the training data are provided in a single language (e.g., English), and thus the resulting systems can perform conversations only in the training language. Commercial dialogue systems are required to handle a large number of languages since the smart home devices market is increasingly international (Etherington, 2019) . Therefore, creating multilingual conversational benchmarks is essential, yet challenging since it is costly to perform human annotation of data in all languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 210, |
|
"text": "(Zhang et al., 2018;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 231, |
|
"text": "Dinan et al., 2019a)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 545, |
|
"end": 564, |
|
"text": "(Etherington, 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A possible solution is to use translation systems before and after the model inference. This comes with three major problems: 1) amplification of translation errors since the current dialogue systems are far from perfect, especially with noisy input; 2) the three-stage pipeline system is significantly slower in terms of inference speed; and 3) high translation costs since the current state-of-theart models, especially in low resources languages, are only available using costly APIs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we analyze two possible workarounds to alleviate the aforementioned challenges. The first is to build a cross-lingual transferable system by aligning cross-lingual representations, as in Conneau et al. (2018) , in which the system is trained on one language and zero-shot to another language. The second is to learn a multilingual system directly from noisy multilingual data (e.g., translated data), thus getting rid of the translation system dependence at inference time.", |
|
"cite_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 223, |
|
"text": "Conneau et al. (2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To evaluate the aforementioned solutions, we propose a dataset called Multilingual Persona-Chat, or XPersona, by extending the Persona-Chat corpora (Dinan et al., 2019a) to six languages: Chinese, French, Indonesian, Italian, Korean, and Japanese. In XPersona, the training sets are automatically translated using translation APIs with several human-in-the-loop passes of mistake correction. In contrast, the validation and test sets are annotated by humans to facilitate both automatic and human evaluations in multiple languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 148, |
|
"end": 169, |
|
"text": "(Dinan et al., 2019a)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Furthermore, we propose competitive baselines in two training settings, namely, cross-lingual and multilingual, and compare them with translation pipeline models. Our baselines leverage pre-Persona I helped design the game starcraft. I am a famous twitch streamer..", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Sys: do you know the game starcraft ? i helped designing it ! Usr: that s awesome , i m sure my ex boyfriend would like that game Sys: i usually stream the game on my twitch channel that s great , i also love twitch as well It Sys: conosci il gioco starcraft? ho aiutato a progettarlo! Usr: \u00e8 fantastico, sono sicuro che al mio ex ragazzo gli piacerebbe Sys: di solito faccio streaming mentre gioco sul mio canale Twitch Fr Sys: connaissez-vous le jeu starcraft? j'ai aid\u00e9 \u00e0 le concevoir! Usr: c'est g\u00e9nial, je suis s\u00fbr que mon ex petit ami aimerait ce jeu Sys: Je diffuse g\u00e9n\u00e9ralement le jeu sur ma cha\u00eene Twitch Id Sys: apakah anda tahu game starcraft? saya yang mendesainnya! Usr: itu luar biasa, saya yakin mantan pacar saya suka game itu. Sys: saya biasanya memainkan game itu di channel twitch saya.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "En", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Sys: \u4f60\u77e5\u9053\u6e38\u620f\u300a\u661f\u9645\u4e89\u9738\u300b\u5417\uff1f\u6211\u5e2e\u5fd9\u8bbe\u8ba1\u4e86\u5b83\uff01 Usr:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zh", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u597d\u5389\u5bb3\uff0c\u6211\u89c9\u5f97\u6211\u7684\u524d\u7537\u53cb\u4f1a\u559c\u6b22\u90a3\u4e2a\u6e38\u620f Sys: \u6211\u7ecf\u5e38\u5728\u6211\u7684\u76f4\u64ad\u9891\u9053\u4e0a\u76f4\u64ad\u6e38\u620f Ko Sys: \u1102 \u1165 \u1100 \u1166\u110b \u1175 \u11b7 \u1109 \u1173\u1110 \u1161\u110f \u1173\u1105 \u1162\u1111 \u1173\u1110 \u1173\u1105 \u1173 \u11af \u110b \u1161\u1102 \u1175? \u1102 \u1161\u1102 \u1173 \u11ab \u1100 \u1173\u1100 \u1165 \u11ba\u110b \u1173 \u11af \u1103 \u1175\u110c \u1161\u110b \u1175 \u11ab\u1112 \u1161\u1102 \u1173 \u11ab \u1100 \u1165 \u11ba\u110b \u1173 \u11af \u1103 \u1169\u110b \u116a \u11bb\u110b \u1165! Usr: \u1106 \u1165 \u11ba\u110c \u1175 \u11ab\u1103 \u1166, \u1102 \u1162 \u110c \u1165 \u11ab \u1102 \u1161 \u11b7\u110c \u1161\u110e \u1175 \u11ab\u1100 \u116e\u1100 \u1161 \u1100 \u1173 \u1100 \u1166\u110b \u1175 \u11b7\u110b \u1173 \u11af \u110c \u1169 \u11c2\u110b \u1161\u1112 \u1161 \u11af \u1100 \u1165\u1105 \u1161\u1100 \u1169 \u1112 \u116a \u11a8\u1109 \u1175 \u11ab\u1112 \u1162. Sys: \u1102 \u1161\u1102 \u1173 \u11ab \u1107 \u1169\u1110 \u1169 \u11bc \u1102 \u1162 \u1110 \u1173\u110b \u1171\u110e \u1175 \u110e \u1162\u1102 \u1165 \u11af\u1105 \u1169 \u1100 \u1173 \u1100 \u1166\u110b \u1175 \u11b7\u110b \u1173 \u11af \u1109 \u1173\u1110 \u1173\u1105 \u1175\u1106 \u1175 \u11bc\u1112 \u1162.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zh", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Sys: \u30b2\u30fc\u30e0\u306e\u30b9\u30bf\u30fc\u30af\u30e9\u30d5\u30c8\u3092\u77e5\u3063\u3066\u3044\u307e\u3059\u304b\uff1f\u79c1\u306f\u305d\u308c\u3092\u8a2d\u8a08\u3059\u308b\u306e\u3092\u52a9\u3051\u307e\u3057\u305f\uff01 Usr: \u305d\u308c\u306f\u3059\u3054\u3044\u3067\u3059\u3001\u79c1\u306f\u79c1\u306e\u5143\u5f7c\u304c\u305d\u306e\u30b2\u30fc\u30e0\u3092\u597d\u304d\u306b\u306a\u308b\u3068\u78ba\u4fe1\u3057\u3066\u3044\u307e\u3059 Sys: \u79c1\u306f\u901a\u5e38\u3001twitch\u30c1\u30e3\u30f3\u30cd\u30eb\u3067\u30b2\u30fc\u30e0\u3092\u30b9\u30c8\u30ea\u30fc\u30df\u30f3\u30b0\u3057\u307e\u3059 Table 1 : Multi-turn annotated dialogue samples from test set in seven languages. For simplicity, we only show three turns for each dialogue and the persona in English.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 128, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Jp", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "trained cross-lingual (Chi et al., 2019) and multilingual (Devlin et al., 2018) models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 40, |
|
"text": "(Chi et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 58, |
|
"end": 79, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Jp", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "An extensive automatic and human evaluation of our models shows that a multilingual system is able to outperform strong translation-based models and on par with or even improve the monolingual model. The cross-lingual performance is still lower than other models, which indicates that cross-lingual conversation modeling is very challenging. The main contributions of this paper are summarized as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Jp", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 We present the first multilingual non-goaloriented dialogue benchmark for evaluating multilingual generative chatbots.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Jp", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 We provide both cross-lingual and multilingual baselines and discuss their limitations to inspire future research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Jp", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 We show the potential of multilingual systems to understand the mixed language dialogue context and generate coherent responses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Jp", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Dialogue Systems are categorized as goaloriented and chit-chat. Interested readers may refer to Gao et al. (2018) for a general overview. In this paper, we focus on the latter, for which, in recent years, several tasks and datasets have been proposed to ground the conversation on knowledge (Dinan et al., 2019b; Gopalakrishnan et al., 2019; Fan et al., 2019; Reddy et al., 2019; Moon et al., 2019 ) such as Wiki-Articles, Reddit-Post, and CNN-Article. In this work, we focus on personalized dialogue agents where the dialogues are grounded on persona information. Li et al. (2016a) was the first to introduce a persona-grounded dialogue dataset for improving response consistency. Later on, Zhang et al. (2018) and Dinan et al. (2019a) introduced Persona-chat, a multi-turn conversational dataset, where two speakers are paired, and a persona description (4-5 sentences) is randomly assigned to each of them. By conditioning the response generation on the persona descriptions, a chit-chat model is able to produce a more persona-consistent dialogue (Zhang et al., 2018) . Several works have improved on the initial baselines with various methodologies, especially using large pre-trained models (Wolf et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 113, |
|
"text": "Gao et al. (2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 291, |
|
"end": 312, |
|
"text": "(Dinan et al., 2019b;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 313, |
|
"end": 341, |
|
"text": "Gopalakrishnan et al., 2019;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 359, |
|
"text": "Fan et al., 2019;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 379, |
|
"text": "Reddy et al., 2019;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 397, |
|
"text": "Moon et al., 2019", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 565, |
|
"end": 582, |
|
"text": "Li et al. (2016a)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 692, |
|
"end": 711, |
|
"text": "Zhang et al. (2018)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 716, |
|
"end": 736, |
|
"text": "Dinan et al. (2019a)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1051, |
|
"end": 1071, |
|
"text": "(Zhang et al., 2018)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 1197, |
|
"end": 1216, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Multilingual Extensive approaches have been introduced to construct multilingual systems, for example, multilingual semantic role labeling (Akbik et al., 2015) Table 2 : The statistics of the collected dataset. We report the number of dialogues (#Dial.) and utterances (#Utt.) of the validation and test set in six languages. Edit distance per dialogue (Edit) and BLEU score are computed to show the difference between the human-annotated dataset and auto-translated dataset (Training set is reported in Appendix A). The BLEU score also reflects the quality of machine translated dialogues.", |
|
"cite_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 159, |
|
"text": "(Akbik et al., 2015)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 167, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "lation (Johnson et al., 2017) , and multilingual automatic speech recognition (Toshniwal et al., 2018) . Multilingual deep contextualized model, such as Multilingual BERT (M-BERT) (Devlin et al., 2018) , MT5 (Xue et al., 2021) , MBART (Liu et al., 2020) have been commonly used to represent multiple languages and elevate the performance in many NLP applications, such as classification tasks (Pires et al., 2019) , textual entailment, named entity recognition (K et al., 2020) , and natural language understanding. Multilingual datasets have also been created for a number of NLP tasks, such as named entity recognition or linking (Pan et al., 2017; Aguilar et al., 2018 ), question answering , dialogue state tracking (Mrk\u0161i\u0107 et al., 2017) , and natural language understanding (Schuster et al., 2019) . However, none of these datasets include the multilingual chit-chat task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 29, |
|
"text": "(Johnson et al., 2017)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 78, |
|
"end": 102, |
|
"text": "(Toshniwal et al., 2018)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 201, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 226, |
|
"text": "(Xue et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 253, |
|
"text": "(Liu et al., 2020)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 413, |
|
"text": "(Pires et al., 2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 461, |
|
"end": 477, |
|
"text": "(K et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 632, |
|
"end": 650, |
|
"text": "(Pan et al., 2017;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 651, |
|
"end": 671, |
|
"text": "Aguilar et al., 2018", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 720, |
|
"end": 741, |
|
"text": "(Mrk\u0161i\u0107 et al., 2017)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 779, |
|
"end": 802, |
|
"text": "(Schuster et al., 2019)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Cross-lingual Cross-lingual adaptation learns the inter-connections among languages and circumvents the requirement of extensive training data in target languages (Wisniewski et al., 2014; Zhang et al., 2016) . Cross-lingual transfer learning methods have been applied to multiple NLP tasks, such as named entity recognition (Ni et al., 2017) , dialogue state tracking , part-ofspeech tagging (Wisniewski et al., 2014; Zhang et al., 2016; Kim et al., 2017) , and dependency parsing (Ahmad et al., 2019) . Meanwhile, Lample and Conneau (2019) and proposed pre-trained cross-lingual language models to align multiple language representations, achieving state-of-the-art results in many cross-lingual classification tasks. The aforementioned tasks focused on classification and sequence labeling, while instead, Chi et al. (2019) proposed to pre-train both the encoder and decoder of a sequence-to-sequence model (XNLG) to conduct cross-lingual generation tasks, namely, question generation and abstractive summarization. The latter is the closest to our task since it focuses on language generation; however cross-lingual dialogue generation has not yet been explored.", |
|
"cite_spans": [ |
|
{ |
|
"start": 163, |
|
"end": 188, |
|
"text": "(Wisniewski et al., 2014;", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 208, |
|
"text": "Zhang et al., 2016)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 342, |
|
"text": "(Ni et al., 2017)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 418, |
|
"text": "(Wisniewski et al., 2014;", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 419, |
|
"end": 438, |
|
"text": "Zhang et al., 2016;", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 439, |
|
"end": 456, |
|
"text": "Kim et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 502, |
|
"text": "(Ahmad et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 516, |
|
"end": 541, |
|
"text": "Lample and Conneau (2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 809, |
|
"end": 826, |
|
"text": "Chi et al. (2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The proposed XPersona dataset is an extension of the persona-chat dataset (Zhang et al., 2018; Dinan et al., 2019a) . Specifically, we extend ConvAI2 (Dinan et al., 2019a) to six languages: Chinese, French, Indonesian, Italian, Korean, and Japanese. Since the test set of ConvAI2 is hidden, we split the original validation set into a new validation set and test sets. Then, we firstly automatically translate the training, validation, and test set using APIs (PapaGo for Korean, Google Translate for other languages). For each language, we hired native speaker annotators with at least a bachelor degree and a fluent level of English and asked them to revise the machine-translated dialogues and persona sentences in the validation set and test set according to original English dialogues. The main goal of human annotation is to ensure the revised conversations are coherent and fluent in target language despite the cultural discrepancy in different languages. Therefore, annotators are not restricted to translate the English dialogues. They are also allowed to customize dialogues and persona sentences. The annotated dialogues can deviate from original translation while retain persona and conversation consistency. the dialogues by leveraging translation APIs has multiple advantages. First, it increases the data distribution similarity across languages (Conneau et al., 2018) , which can better examine the system's cross-lingual transferability. Second, revising the machine-translated dialogues based on the original English dialogue improves the data construction efficiency. Third, it leverages the well-constructed English persona conversations as a reference to ensure the dialogue quality without the need for training a new pool of workers to generate new samples (Conneau et al., 2018 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 94, |
|
"text": "(Zhang et al., 2018;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 95, |
|
"end": 115, |
|
"text": "Dinan et al., 2019a)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1362, |
|
"end": 1384, |
|
"text": "(Conneau et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1781, |
|
"end": 1802, |
|
"text": "(Conneau et al., 2018", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "On the other hand, human-translating the entire training-set (\u223c130K utterances) in six languages is expensive. Therefore, we propose an iterative method to improve the quality of the automatically translated training set. We firstly sample 200 dialogues from the training set (\u223c2600 utterances) in each language, and we assign human annotators to list all frequent translation mistakes in the given dialogues. For example, daily colloquial English expressions such as \"cool\", \"I see\", and \"lol\" are usually literally translated. After that, we use a simple string matching to revise the inappropriate translations 2 in the whole training-set and return a revision log, which records all the revised utterances. Then, we assign human annotators to check all the revised utterances and list translation mistakes again. We repeat this process at least twice for each language. Finally, we summarize the statistics of the collected dataset in Table 2 . 2 The list of corrections and matching rules are reported in Appendix.", |
|
"cite_spans": [ |
|
{ |
|
"start": 949, |
|
"end": 950, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 939, |
|
"end": 946, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Let us define a dialogue D = {U 1 , S 1 , U 2 , S 2 , . . . , U n , S n } as an alternating set of utterances from two speakers, where U and S represent the user and the system, respectively. Each speaker has its corresponding persona description that consists of a set of sentences P = {P 1 , . . . , P m }. Given the system persona sentences P s , dialogue history U \u2264k , S <k , and response language l, we are interested in predicting the next system utterances S k with model f (\u03b8).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Personalized Conversational Models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "S k = f (U \u2264k , S <k , l; \u03b8)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Multilingual Personalized Conversational Models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We explore both encoder-decoder and causal decoder architectures, and we leverage existing pretrained contextualized multilingual language models as weights initialization. Hence, we firstly define the multilingual embedding layer and then the two multilingual models used in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Embedding We define three embedding matrices: word embedding E W \u2208 R |V |\u00d7d , positional embedding E P \u2208 R M \u00d7d , and segmentation embedding E S \u2208 R |S|\u00d7d , where |.| denotes set cardinality, d is the embedding size, V denotes the vocabulary, M denotes the maximum sequence length, and S denotes the set of segmentation tokens. Segmentation embedding (Wolf et al., 2019) is used to indicate whether the current token is part of i) Persona sentences, ii) System (Sys.) utter-ances, iii) User utterances, iv) response in Language l. The language embedding l id is used to inform the model which language to generate. Hence, given a sequence of tokens X, the embedding functions E are defined as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 351, |
|
"end": 370, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "E(X) = E W (X) \u2295 E P (X pos ) \u2295 E S (X seg ),", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where \u2295 denotes the positional sum, X pos = {1, . . . , |X|} and X seg is the sequence of segmentation tokens, as in Wolf et al. (2019) . Figure 1 shows a visual representation of the embedding process. A more detailed illustration is reported in Appendix B.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 135, |
|
"text": "Wolf et al. (2019)", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 146, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Encoder-Decoder To model the response generation, we use a Transformer (Vaswani et al., 2017) based encoder-decoder (Vinyals and Le, 2015) . As illustrated in Figure 1 , we concatenate 3 the system persona P s with the dialogue history U \u2264k , S <k . Then we use the embedding layer E to finally pass it to the encoder. In short, we have:", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 93, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 116, |
|
"end": 138, |
|
"text": "(Vinyals and Le, 2015)", |
|
"ref_id": "BIBREF46" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 167, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "H = Encoder(E([P s , U \u2264k , S <k ])),", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where H \u2208 R L\u00d7d model is the hidden representation computed by the encoder, and L denotes the input sequence length. Then, the decoder attends to H and generates the system response S k token by token. In the decoder, segmentation embedding is the language ID embedding (e.g., we look up the embedding for Italian to decode Italian). Thus:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "S k = Decoder(H, l),", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Causal Decoder As an alternative to encoderdecoders, the causal-decoders (Radford et al., 2018 (Radford et al., , 2019 He et al., 2018) have been used to model conversational responses (Wolf et al., 2019; by giving as a prefix the dialogue history. In our model, we concatenate the persona P s and the dialogue history U \u2264k , S <k as the language model prefix, and autoregressively decode the system response S k based on language embedding: Figure 1 shows the conceptual differences between the encoder-decoder and casual decoder. Note that in both multilingual models, the dialogue history encoding process is language-agnostic, while decoding language is controlled by the language embedding. Such design allows the model 3 [a; b] denotes concatenating the vectors a and b to understand mixed-language dialogue contexts and to respond in the desired language (details in Section 5.3.2).", |
|
"cite_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 94, |
|
"text": "(Radford et al., 2018", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 95, |
|
"end": 118, |
|
"text": "(Radford et al., , 2019", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 119, |
|
"end": 135, |
|
"text": "He et al., 2018)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 185, |
|
"end": 204, |
|
"text": "(Wolf et al., 2019;", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 442, |
|
"end": 450, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "S k = Decoder(E([P s , U \u2264k , S <k ]), l). (5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We consider two training strategies to learn a multilingual conversational model: multilingual training and cross-lingual training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Strategy", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Multilingual Training jointly learns personalized conversations in multiple languages. We follow a transfer learning approach (Wolf et al., 2019) by initializing our models with the weights of the large multilingual pretrained model M-Bert (Pires et al., 2019) . For the causal decoder, we add the causal mask into self-attention layer to convert M-Bert encoder to decoder. For encoder-decoder model, we randomly initialize the cross encoderdecoder attention (Rothe et al., 2019) . Then, we train the both models on the combined training set in all 7 languages using cross-entropy loss.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 145, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 260, |
|
"text": "(Pires et al., 2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 479, |
|
"text": "(Rothe et al., 2019)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Strategy", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Cross-lingual Training transfers knowledge from the source language data to the target languages. In this setting, the model is trained on English (source language) conversational samples, and evaluated on the other 6 languages. Following the methodology proposed by Chi et al. (2019) , we align the embedded representations of different languages into the same embedding space by applying cross-lingual pre-training to the encoder-decoder model. The pre-training procedure consists of two stages:", |
|
"cite_spans": [ |
|
{ |
|
"start": 267, |
|
"end": 284, |
|
"text": "Chi et al. (2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Strategy", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 pre-training the encoder and the decoder independently utilizing masked language modeling, as in Lample and Conneau (2019);", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Strategy", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 jointly pre-training the encoder-decoder by using two objective functions: Cross-Lingual Auto-Encoding (XAE) and Denoising Auto-Encoding (DAE) (Chi et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 163, |
|
"text": "(Chi et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Strategy", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For instance, DAE adds perturbations to the input sentence of encoder and tries to reconstructs the original sentence using the decoder, whereas, XAE uses parallel translation data to pre-train both the encoder and decoder with machine translation objective. As in the multilingual models, the language IDs are fed into the decoder to control the language of generated sentences. Both pre-training stages require both parallel and non-parallel data in the target language. After the two stages of pre-training, the model is fine-tuned using just the source language samples (i.e., English) with the same cross-entropy loss as for the multilingual training. However, as suggested in Chi et al. (2019) , only the encoder parameters are updated with back-propagation and both the decoder and the word embedding layer remain frozen. This retains the decoders' ability to generate multilingual output while still being able to learn new tasks using only the target language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 682, |
|
"end": 699, |
|
"text": "Chi et al. (2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Strategy", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Evaluating open-domain chit-chat models is challenging, especially in multiple languages and at the dialogue-level. Hence, we evaluate our models using both automatic and human evaluation. In both cases, human-annotated dialogues are used, which show the importance of the provided dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Automatic For each language, we evaluate responses generated by the models using perplexity (ppl.) and BLEU (Papineni et al., 2002) with reference to the human-annotated responses. Although these automatic measures are not perfect (Liu et al., 2016) , they help to roughly estimate the performance of different models under the same test set. More recently, Adiwardana et al. (2020) has shown the correlation between perplexity and human judgment in open-domain chit-chat models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 131, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 249, |
|
"text": "(Liu et al., 2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 358, |
|
"end": 382, |
|
"text": "Adiwardana et al. (2020)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Human Asking humans to evaluate the quality of a dialogue model is challenging, especially when multiple models have to be compared. The likert score (a.k.a. 1 to 5 scoring) has been widely used to evaluate the interactive experience with conversational models (Venkatesh et al., 2018; See et al., 2019; Zhang et al., 2018; Dinan et al., 2019a) . In such evaluation, a human interacts with the systems for several turns, and then they assign a score from 1 to 5 based on three questions (Zhang et al., 2018) about fluency, engagingness, and consistency. This evaluation is both expensive to conduct and requires many samples to achieve statistically significant results . To cope with these issues, proposed ACUTE-EVAL, an evaluation for dialogue systems. The authors proposed two modes: human-model chats and selfchat (Li et al., 2016b; Ghandeharioun et al., 2019) . In this work, we opt for the latter since it is cheaper to conduct and achieves similar results to the former. Another advantage of using this method is the ability to evaluate multi-turn conversations instead of single-turn responses.", |
|
"cite_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 285, |
|
"text": "(Venkatesh et al., 2018;", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 303, |
|
"text": "See et al., 2019;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 304, |
|
"end": 323, |
|
"text": "Zhang et al., 2018;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 324, |
|
"end": 344, |
|
"text": "Dinan et al., 2019a)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 487, |
|
"end": 507, |
|
"text": "(Zhang et al., 2018)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 819, |
|
"end": 837, |
|
"text": "(Li et al., 2016b;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 838, |
|
"end": 865, |
|
"text": "Ghandeharioun et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Following ACUTE-EVAL, the annotator is provided with two full dialogues made by self-chat or human-dialogue. The annotator is asked to choose which of the two dialogues is better in terms of engagingness, interestingness, and humanness. For each comparison, we sample 60-100 conversations from both models. In Appendix C, we report the exact questions and instructions given to the annotators, and the user interface used in the evaluation. We hired at least 10 annotators for each considered language, the annotators are either native speakers or linguists in corresponding language. The annotators were different from the dataset collection annotators to avoid any possible bias.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Multilingual Models We use the \"BERT-Base, Multilingual Cased\" checkpoint, and we denote the multilingual encoder-decoder model as M-Bert2Bert (\u223c220M parameters) and causal decoder model as M-CausalBert (\u223c110M parameters). We fine-tune both models in the combined training set (English in Persona-chat (Zhang et al., 2018) , six languages in Xpersona) for five epochs with AdamW optimizer and a learning rate of 6.25e-5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 302, |
|
"end": 322, |
|
"text": "(Zhang et al., 2018)", |
|
"ref_id": "BIBREF50" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Monolingual Models To verify whether the multilingual agent will under-perform the monolingual agent in the monolingual conversational task, we build a monolingual encoder-decoder model and causal decoder model for each language. For a fair comparison, we initialize the monolingual models with a pre-trained monolingual BERT 4 (Devlin et al., 2018; Cui et al., 2019; Martin et al., 2019) . We denote the monolingual encoder-decoder model as Bert2Bert (\u223c220M parameters) and causal decoder model as CausalBert (\u223c110M parameters). Then we fine-tune each model in each language independently for the same number of epoch and optimizer as the multilingual model. Our CausalBert model achieve 16.08 perplexity, which is similar to 17.51 of the GPT based models (Wolf et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 328, |
|
"end": 349, |
|
"text": "(Devlin et al., 2018;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 350, |
|
"end": 367, |
|
"text": "Cui et al., 2019;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 388, |
|
"text": "Martin et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 757, |
|
"end": 776, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Translation-based Models Another strong baseline we compare with is Poly-encoder (Humeau et al., 2019) , a large-scale pre-trained retrieval model that fine-tuned on English Persona-chat, has shown state-of-the-art performance in the ConvAI dataset Humeau et al., 2019) . We adapt this model to the other languages by using the Google Translate API to translate target languages (e.g., Chinese) query to English as the input to the model, then translate the English response back to the target language. Thus, the response generation flow is: target query \u2192 English query \u2192 English response \u2192 target response. We denote this model as Poly.", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 102, |
|
"text": "(Humeau et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 269, |
|
"text": "Humeau et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Cross-lingual Models. In the first pre-training stage, we use the pre-trained weights from XLMRbase . Then, we follow the second pre-training stage of XNLG (Chi et al., 2019) for pre-training Italian, Japanese, Korean, Indonesia cross-lingual transferable models. For Chinese and French, we directly apply the pretrained XNLG (Chi et al., 2019) weights 5 . Then, the pre-trained models are fine-tune on English Per-sonaChat training set and early stop based on the perplexity on target language validation set. Table 3 compares monolingual, multilingual, and cross-lingual models in terms of BLEU and perplexity in the human-translated test set. On both evaluation matrices, the causal decoder models outperform the encoder-decoder models. We observe that the encoder-decoder model tends to overlook dialogue context and generate digressive responses. (Generated samples are available in Appendix D)", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 174, |
|
"text": "(Chi et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 344, |
|
"text": "(Chi et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 511, |
|
"end": 518, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We hypothesize that this is because the one-tomany problem (Zhao et al., 2017) in open-domain conversation weakens the relation between encoder and decoder; thus the well pre-trained decoder (Bert) easily converges to a local optimum, and learns to ignore the dialogue context from the encoder and generate the response in an unconditional language model way. We leave the investigation of this problem to future work. On the other hand, M-CausalBert achieves a comparable or slightly better performance compared to CausalBert, which suggests that M-CausalBert leverages the data from other languages. As expected, we observe a significant gap between the cross-lingual model and other models, which indicates that cross-lingual zero-shot conversation modeling is very challenging. Table 4 shows the human evaluation result of comparing M-CausalBert (Multi) against the human, translation-based Poly-encoder (Poly), and monolingual CausalBert (Mono). The results illustrate that Multi outperforms Mono in English and Chinese, and is on par with Mono in other languages. On the other hand, Poly shows a strong performance in English as it was pre-trained with a large-scale English conversation corpus. In contrast, the performance of Poly drops in other languages, which indicates that the imperfect translation affects translation-based systems. We also conduct M-CausalBert against XNLG (cross) human evaluation, and Multi achieve nearly 100 percent winning rate.", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 78, |
|
"text": "(Zhao et al., 2017)", |
|
"ref_id": "BIBREF53" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 782, |
|
"end": 789, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Quantitative Analysis", |
|
"sec_num": "5.3.1" |
|
}, |
|
{ |
|
"text": "We randomly sample 7 self-chat dialogues for each baseline model in the seven languages and report them in Appendix D., And we summarize the generation of each model as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative Analysis and Discussion", |
|
"sec_num": "5.3.2" |
|
}, |
|
{ |
|
"text": "Poly Poly-encoder, pretrained on 174 million Reddit data and fine tuned on English Persona-Chat, can accurately retrieve coherent and diverse responses in English. However, in the other six languages, some of the retrieved responses are digressive due to translation errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative Analysis and Discussion", |
|
"sec_num": "5.3.2" |
|
}, |
|
{ |
|
"text": "We observe that both the monolingual and multilingual models can generate fluent responses. Compared to Bert2Bert and M-Bert2Bert, CausalBert and M-CausalBert can generate more on-topic responses but sometimes repeat through turns. CausalBert and M-CausalBert are on par with each other in monolingual conversational tasks, while M-CausalBert shows the advantage of handling a mixed-language context. For multilingual speakers, the conversation may involve multiple languages. Therefore, we test M-CausalBert with extra-Sentential code-switching context. Table 5 shows that M-CausalBert can understand the mixed-language context, and decode coherent responses. In Appendix C.1, we report more generation examples, and we also show that the response language of M-CausalBert can be control by language embeddings. Usr \u1102 \u1161\u1102 \u1173 \u11ab \u1110 \u1175\u1107 \u1175 \u1107 \u1169\u1102 \u1173 \u11ab \u1100 \u1165 \u11ba\u110b \u1173 \u11af \u110c \u1169 \u11c2\u110b \u1161\u1112 \u1162.(I like to watch tv) Sys i really like hiking and listening to music Cross-lingual. The current state-of-the-art crosslingual generation approach XNLG (Chi et al., 2019) shows inferior performance on multi-turn dialogue tasks, and generates repetitive responses. Although cross-lingual dialogue generation is challenging, it reduces the human effort for data annotation in different languages. Therefore, the crosslanguage transfer is an important direction to investigate.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1016, |
|
"end": 1034, |
|
"text": "(Chi et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 555, |
|
"end": 562, |
|
"text": "Table 5", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Monolingual & Multilingual", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, we studied both cross-lingual and multilingual approaches in end-to-end personalized dialogue modeling. We presented the XPersona dataset, a multilingual extension of Persona-Chat, for evaluating the multilingual personalized chatbots. We further provided both cross-lingual and multilingual baselines and compared them with the monolingual approach and two-stage translation approach. Extensive automatic evaluation and human evaluation were conducted to examine the models' performance. The experimental results showed that multilingual trained models, with a single model across multiple languages, can outperform the twostage translation approach and is on par with monolingual models. On the other hand, the current stateof-the-art cross-lingual approach XNLG achieved lower performance than other baselines. In future work, we plan to research a more advanced crosslingual generation approach and construct a mixedlanguage conversational benchmark for evaluating multilingual systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Sys Usr Usr Usr Usr Usr En En En En En En En Xpos Xseg M-Decoder Xseg Xpos X En En En", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The monolingual BERT pre-trained models are available in https://github.com/huggingface/transformers", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Available in https://github.com/CZWin32768/XNLG", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Towards a human-like open-domain chatbot", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Adiwardana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Fiedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Romal", |
|
"middle": [], |
|
"last": "Thoppilan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Apoorv", |
|
"middle": [], |
|
"last": "Kulshreshtha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gaurav", |
|
"middle": [], |
|
"last": "Nemade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yifeng", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2001.09977" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Adiwardana, Minh-Thang Luong, David R So, Jamie Hall, Noah Fiedel, Romal Thoppilan, Zi Yang, Apoorv Kulshreshtha, Gaurav Nemade, Yifeng Lu, et al. 2020. Towards a human-like open-domain chatbot. arXiv preprint arXiv:2001.09977.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Named entity recognition on code-switched data: Overview of the calcs 2018 shared task", |
|
"authors": [ |
|
{ |
|
"first": "Gustavo", |
|
"middle": [], |
|
"last": "Aguilar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fahad", |
|
"middle": [], |
|
"last": "Alghamdi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Soto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hirschberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thamar", |
|
"middle": [], |
|
"last": "Solorio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Workshop on Computational Approaches to Linguistic Code-Switching", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "138--147", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gustavo Aguilar, Fahad AlGhamdi, Victor Soto, Mona Diab, Julia Hirschberg, and Thamar Solorio. 2018. Named entity recognition on code-switched data: Overview of the calcs 2018 shared task. In Proceed- ings of the Third Workshop on Computational Ap- proaches to Linguistic Code-Switching, pages 138- 147.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "On difficulties of cross-lingual transfer with order differences: A case study on dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Wasi", |
|
"middle": [], |
|
"last": "Ahmad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhisong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuezhe", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2440--2452", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wasi Ahmad, Zhisong Zhang, Xuezhe Ma, Eduard Hovy, Kai-Wei Chang, and Nanyun Peng. 2019. On difficulties of cross-lingual transfer with order differ- ences: A case study on dependency parsing. In Pro- ceedings of the 2019 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long and Short Papers), pages 2440-2452.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Generating high quality proposition banks for multilingual semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Akbik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Chiticariu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marina", |
|
"middle": [], |
|
"last": "Danilevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunyao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shivakumar", |
|
"middle": [], |
|
"last": "Vaithyanathan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huaiyu", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "397--407", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Akbik, Laura Chiticariu, Marina Danilevsky, Yun- yao Li, Shivakumar Vaithyanathan, and Huaiyu Zhu. 2015. Generating high quality proposition banks for multilingual semantic role labeling. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 397-407.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Xlnbt: A cross-lingual neural belief tracking framework", |
|
"authors": [ |
|
{ |
|
"first": "Wenhu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianshu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xifeng", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"Yang" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "414--424", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenhu Chen, Jianshu Chen, Yu Su, Xin Wang, Dong Yu, Xifeng Yan, and William Yang Wang. 2018. Xl- nbt: A cross-lingual neural belief tracking frame- work. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 414-424.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Cross-lingual natural language generation via pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Zewen", |
|
"middle": [], |
|
"last": "Chi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenhui", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xian-Ling", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heyan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.10481" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zewen Chi, Li Dong, Furu Wei, Wenhui Wang, Xian- Ling Mao, and Heyan Huang. 2019. Cross-lingual natural language generation via pre-training. arXiv preprint arXiv:1909.10481.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Unsupervised cross-lingual representation learning at scale", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kartikay", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Wenzek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.02116" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2019. Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Xnli: Evaluating crosslingual sentence representations", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruty", |
|
"middle": [], |
|
"last": "Rinott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2475--2485", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Ruty Rinott, Guillaume Lample, Ad- ina Williams, Samuel Bowman, Holger Schwenk, and Veselin Stoyanov. 2018. Xnli: Evaluating cross- lingual sentence representations. In Proceedings of the 2018 Conference on Empirical Methods in Natu- ral Language Processing, pages 2475-2485.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Pre-training with whole word masking for chinese bert", |
|
"authors": [ |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanxiang", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziqing", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shijin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guoping", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.08101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, and Guoping Hu. 2019. Pre-training with whole word masking for chinese bert. arXiv preprint arXiv:1906.08101.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "The second conversational intelligence challenge (convai2)", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varvara", |
|
"middle": [], |
|
"last": "Logacheva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Valentin", |
|
"middle": [], |
|
"last": "Malykh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Shuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jack", |
|
"middle": [], |
|
"last": "Urbanek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arthur", |
|
"middle": [], |
|
"last": "Szlam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iulian", |
|
"middle": [], |
|
"last": "Serban", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Lowe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1902.00098" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Dinan, Varvara Logacheva, Valentin Malykh, Alexander Miller, Kurt Shuster, Jack Urbanek, Douwe Kiela, Arthur Szlam, Iulian Serban, Ryan Lowe, et al. 2019a. The second conversational intelligence challenge (convai2). arXiv preprint arXiv:1902.00098.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Wizard of wikipedia: Knowledge-powered conversational agents", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Roller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Shuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Dinan, Stephen Roller, Kurt Shuster, Angela Fan, Michael Auli, and Jason Weston. 2019b. Wiz- ard of wikipedia: Knowledge-powered conversa- tional agents. In International Conference on Learn- ing Representations.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Amazon launches multilingual mode for using alexa in multiple languages at once", |
|
"authors": [ |
|
{ |
|
"first": "Darrell", |
|
"middle": [ |
|
"Etherington" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Darrell Etherington. 2019. Amazon launches multilin- gual mode for using alexa in multiple languages at once.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Eli5: Long form question answering", |
|
"authors": [ |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ethan", |
|
"middle": [], |
|
"last": "Perez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.09190" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angela Fan, Yacine Jernite, Ethan Perez, David Grang- ier, Jason Weston, and Michael Auli. 2019. Eli5: Long form question answering. arXiv preprint arXiv:1907.09190.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Neural approaches to conversational ai", |
|
"authors": [ |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lihong", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "The 41st International ACM SIGIR Conference on Research & Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1371--1374", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianfeng Gao, Michel Galley, and Lihong Li. 2018. Neural approaches to conversational ai. In The 41st International ACM SIGIR Conference on Re- search & Development in Information Retrieval, pages 1371-1374. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Approximating interactive human evaluation with self-play for open-domain dialog systems", |
|
"authors": [ |
|
{ |
|
"first": "Asma", |
|
"middle": [], |
|
"last": "Ghandeharioun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Judy", |
|
"middle": [ |
|
"Hanwen" |
|
], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natasha", |
|
"middle": [], |
|
"last": "Jaques", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Craig", |
|
"middle": [], |
|
"last": "Ferguson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Agata", |
|
"middle": [], |
|
"last": "Lapedriza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rosalind", |
|
"middle": [], |
|
"last": "Picard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13658--13669", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Asma Ghandeharioun, Judy Hanwen Shen, Natasha Jaques, Craig Ferguson, Noah Jones, Agata Lapedriza, and Rosalind Picard. 2019. Approximat- ing interactive human evaluation with self-play for open-domain dialog systems. In Advances in Neu- ral Information Processing Systems, pages 13658- 13669.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Topical-chat: Towards knowledge-grounded open-domain conversations", |
|
"authors": [ |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Gopalakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Behnam", |
|
"middle": [], |
|
"last": "Hedayatnia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qinlang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Gottardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Kwatra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anu", |
|
"middle": [], |
|
"last": "Venkatesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raefer", |
|
"middle": [], |
|
"last": "Gabriel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proc. Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1891--1895", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karthik Gopalakrishnan, Behnam Hedayatnia, Qin- lang Chen, Anna Gottardi, Sanjeev Kwatra, Anu Venkatesh, Raefer Gabriel, Dilek Hakkani-T\u00fcr, and Amazon Alexa AI. 2019. Topical-chat: To- wards knowledge-grounded open-domain conversa- tions. Proc. Interspeech 2019, pages 1891-1895.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Layer-wise coordination between encoder and decoder for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Tianyu", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yingce", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhibo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tie-Yan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7944--7954", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianyu He, Xu Tan, Yingce Xia, Di He, Tao Qin, Zhibo Chen, and Tie-Yan Liu. 2018. Layer-wise coordi- nation between encoder and decoder for neural ma- chine translation. In Advances in Neural Informa- tion Processing Systems, pages 7944-7954.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Poly-encoders: Transformer architectures and pre-training strategies for fast and accurate multi-sentence scoring", |
|
"authors": [ |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Humeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Shuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Anne", |
|
"middle": [], |
|
"last": "Lachaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "External Links: Link Cited by", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "2--2", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel Humeau, Kurt Shuster, Marie-Anne Lachaux, and Jason Weston. 2019. Poly-encoders: Trans- former architectures and pre-training strategies for fast and accurate multi-sentence scoring. CoRR abs/1905.01969. External Links: Link Cited by, 2:2- 2.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Google's multilingual neural machine translation system: Enabling zero-shot translation", |
|
"authors": [ |
|
{ |
|
"first": "Melvin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxim", |
|
"middle": [], |
|
"last": "Krikun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Thorat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernanda", |
|
"middle": [], |
|
"last": "Vi\u00e9gas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Wattenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "339--351", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Melvin Johnson, Mike Schuster, Quoc Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Vi\u00e9gas, Martin Wattenberg, Greg Corrado, et al. 2017. Google's multilingual neural machine translation system: Enabling zero-shot translation. Transactions of the Association for Computational Linguistics, 5:339-351.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Cross-lingual ability of multilingual bert: An empirical study", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Karthikeyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Mayhew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karthikeyan K, Zihan Wang, Stephen Mayhew, and Dan Roth. 2020. Cross-lingual ability of multilin- gual bert: An empirical study. In International Con- ference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Cross-lingual transfer learning for pos tagging without cross-lingual resources", |
|
"authors": [ |
|
{ |
|
"first": "Joo-Kyung", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Young-Bum", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruhi", |
|
"middle": [], |
|
"last": "Sarikaya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Fosler-Lussier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2832--2838", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joo-Kyung Kim, Young-Bum Kim, Ruhi Sarikaya, and Eric Fosler-Lussier. 2017. Cross-lingual transfer learning for pos tagging without cross-lingual re- sources. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2832-2838.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Crosslingual language model pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.07291" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample and Alexis Conneau. 2019. Cross- lingual language model pretraining. arXiv preprint arXiv:1901.07291.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Mlqa: Evaluating cross-lingual extractive question answering", |
|
"authors": [ |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barlas", |
|
"middle": [], |
|
"last": "Oguz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruty", |
|
"middle": [], |
|
"last": "Rinott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.07475" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrick Lewis, Barlas Oguz, Ruty Rinott, Sebastian Riedel, and Holger Schwenk. 2019. Mlqa: Eval- uating cross-lingual extractive question answering. arXiv preprint arXiv:1910.07475.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A persona-based neural conversation model", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgios", |
|
"middle": [], |
|
"last": "Spithourakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "994--1003", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Georgios Sp- ithourakis, Jianfeng Gao, and Bill Dolan. 2016a. A persona-based neural conversation model. In Pro- ceedings of the 54th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 994-1003.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Deep reinforcement learning for dialogue generation", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Monroe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.01541" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Will Monroe, Alan Ritter, Michel Galley, Jian- feng Gao, and Dan Jurafsky. 2016b. Deep rein- forcement learning for dialogue generation. arXiv preprint arXiv:1606.01541.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Acute-eval: Improved dialogue evaluation with optimized questions and multi-turn comparisons", |
|
"authors": [ |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Roller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.03087" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Margaret Li, Jason Weston, and Stephen Roller. 2019. Acute-eval: Improved dialogue evaluation with opti- mized questions and multi-turn comparisons. arXiv preprint arXiv:1909.03087.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "How not to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation", |
|
"authors": [ |
|
{ |
|
"first": "Chia-Wei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Lowe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iulian", |
|
"middle": [], |
|
"last": "Serban", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Noseworthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Charlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joelle", |
|
"middle": [], |
|
"last": "Pineau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2122--2132", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1230" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chia-Wei Liu, Ryan Lowe, Iulian Serban, Mike Nose- worthy, Laurent Charlin, and Joelle Pineau. 2016. How not to evaluate your dialogue system: An em- pirical study of unsupervised evaluation metrics for dialogue response generation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2122-2132. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Xqa: A cross-lingual open-domain question answering dataset", |
|
"authors": [ |
|
{ |
|
"first": "Jiahua", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yankai", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2358--2368", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiahua Liu, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2019. Xqa: A cross-lingual open-domain ques- tion answering dataset. In Proceedings of the 57th Annual Meeting of the Association for Computa- tional Linguistics, pages 2358-2368.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Multilingual denoising pre-training for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "726--742", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pre-training for neural machine translation. Transac- tions of the Association for Computational Linguis- tics, 8:726-742.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "\u00c9ric Villemonte de la Clergerie", |
|
"authors": [ |
|
{ |
|
"first": "Louis", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Muller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pedro Javier Ortiz", |
|
"middle": [], |
|
"last": "Su\u00e1rez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoann", |
|
"middle": [], |
|
"last": "Dupont", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Romary", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Djam\u00e9 Seddah, and Beno\u00eet Sagot. 2019. Camembert: a tasty french language model", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.03894" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Louis Martin, Benjamin Muller, Pedro Javier Ortiz Su\u00e1rez, Yoann Dupont, Laurent Romary, \u00c9ric Ville- monte de la Clergerie, Djam\u00e9 Seddah, and Beno\u00eet Sagot. 2019. Camembert: a tasty french language model. arXiv preprint arXiv:1911.03894.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Opendialkg: Explainable conversational reasoning with attention-based walks over knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "Seungwhan", |
|
"middle": [], |
|
"last": "Moon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pararth", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anuj", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajen", |
|
"middle": [], |
|
"last": "Subba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "845--854", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seungwhan Moon, Pararth Shah, Anuj Kumar, and Ra- jen Subba. 2019. Opendialkg: Explainable conver- sational reasoning with attention-based walks over knowledge graphs. In Proceedings of the 57th An- nual Meeting of the Association for Computational Linguistics, pages 845-854.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Semantic specialization of distributional word vector spaces using monolingual and cross-lingual constraints", |
|
"authors": [ |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Mrk\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00d3", |
|
"middle": [], |
|
"last": "Diarmuid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ira", |
|
"middle": [], |
|
"last": "S\u00e9aghdha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roi", |
|
"middle": [], |
|
"last": "Leviant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Reichart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Ga\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "309--324", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikola Mrk\u0161i\u0107, Ivan Vuli\u0107, Diarmuid \u00d3 S\u00e9aghdha, Ira Leviant, Roi Reichart, Milica Ga\u0161i\u0107, Anna Korho- nen, and Steve Young. 2017. Semantic specializa- tion of distributional word vector spaces using mono- lingual and cross-lingual constraints. Transactions of the Association for Computational Linguistics, 5:309-324.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Weakly supervised cross-lingual named entity recognition via effective annotation and representation projection", |
|
"authors": [ |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Ni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgiana", |
|
"middle": [], |
|
"last": "Dinu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Florian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1470--1480", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jian Ni, Georgiana Dinu, and Radu Florian. 2017. Weakly supervised cross-lingual named entity recog- nition via effective annotation and representation projection. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1470-1480.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Crosslingual name tagging and linking for 282 languages", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoman", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Boliang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "May", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Nothman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1946--1958", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaoman Pan, Boliang Zhang, Jonathan May, Joel Nothman, Kevin Knight, and Heng Ji. 2017. Cross- lingual name tagging and linking for 282 languages. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 1946-1958.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th annual meeting on association for computational linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th annual meeting on association for compu- tational linguistics, pages 311-318. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "How multilingual is multilingual bert?", |
|
"authors": [ |
|
{ |
|
"first": "Telmo", |
|
"middle": [], |
|
"last": "Pires", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Schlinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Garrette", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4996--5001", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual bert? In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4996-5001.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Improving language understanding with unsupervised learning", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Karthik Narasimhan, Time Salimans, and Ilya Sutskever. 2018. Improving language un- derstanding with unsupervised learning. Technical report, OpenAI.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Language models are unsupervised multitask learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "OpenAI Blog", |
|
"volume": "1", |
|
"issue": "8", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog, 1(8):9.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Coqa: A conversational question answering challenge", |
|
"authors": [ |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "249--266", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siva Reddy, Danqi Chen, and Christopher D Manning. 2019. Coqa: A conversational question answering challenge. Transactions of the Association for Com- putational Linguistics, 7:249-266.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Leveraging pre-trained checkpoints for sequence generation tasks", |
|
"authors": [ |
|
{ |
|
"first": "Sascha", |
|
"middle": [], |
|
"last": "Rothe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aliaksei", |
|
"middle": [], |
|
"last": "Severyn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.12461" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sascha Rothe, Shashi Narayan, and Aliaksei Sev- eryn. 2019. Leveraging pre-trained checkpoints for sequence generation tasks. arXiv preprint arXiv:1907.12461.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Cross-lingual transfer learning for multilingual task oriented dialog", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sonal", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rushin", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3795--3805", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Schuster, Sonal Gupta, Rushin Shah, and Mike Lewis. 2019. Cross-lingual transfer learning for multilingual task oriented dialog. In Proceed- ings of the 2019 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 3795-3805.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "What makes a good conversation? how controllable attributes affect human judgments", |
|
"authors": [ |
|
{ |
|
"first": "Abigail", |
|
"middle": [], |
|
"last": "See", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Roller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1902.08654" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abigail See, Stephen Roller, Douwe Kiela, and Jason Weston. 2019. What makes a good conversation? how controllable attributes affect human judgments. arXiv preprint arXiv:1902.08654.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Multilingual speech recognition with a single end-to-end model", |
|
"authors": [ |
|
{ |
|
"first": "Shubham", |
|
"middle": [], |
|
"last": "Toshniwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Tara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Sainath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pedro", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Moreno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kanishka", |
|
"middle": [], |
|
"last": "Weinstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4904--4908", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shubham Toshniwal, Tara N Sainath, Ron J Weiss, Bo Li, Pedro Moreno, Eugene Weinstein, and Kan- ishka Rao. 2018. Multilingual speech recognition with a single end-to-end model. In 2018 IEEE Inter- national Conference on Acoustics, Speech and Sig- nal Processing (ICASSP), pages 4904-4908. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "On evaluating and comparing conversational agents", |
|
"authors": [ |
|
{ |
|
"first": "Anu", |
|
"middle": [], |
|
"last": "Venkatesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chandra", |
|
"middle": [], |
|
"last": "Khatri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashwin", |
|
"middle": [], |
|
"last": "Ram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fenfei", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raefer", |
|
"middle": [], |
|
"last": "Gabriel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Nagar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rohit", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Behnam", |
|
"middle": [], |
|
"last": "Hedayatnia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angeliki", |
|
"middle": [], |
|
"last": "Metallinou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "60--68", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1801.03625" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anu Venkatesh, Chandra Khatri, Ashwin Ram, Fen- fei Guo, Raefer Gabriel, Ashish Nagar, Rohit Prasad, Ming Cheng, Behnam Hedayatnia, Ange- liki Metallinou, et al. 2018. On evaluating and comparing conversational agents. arXiv preprint arXiv:1801.03625, 4:60-68.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "A neural conversational model", |
|
"authors": [ |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1506.05869" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oriol Vinyals and Quoc V Le. 2015. A neural conver- sational model. arXiv preprint arXiv:1506.05869.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Crosslingual part-of-speech tagging through ambiguous learning", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Wisniewski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "P\u00e9cheux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Souhir", |
|
"middle": [], |
|
"last": "Gahbiche-Braham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fran\u00e7ois", |
|
"middle": [], |
|
"last": "Yvon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1779--1785", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Wisniewski, Nicolas P\u00e9cheux, Souhir Gahbiche-Braham, and Fran\u00e7ois Yvon. 2014. Cross- lingual part-of-speech tagging through ambiguous learning. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1779-1785.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Transfertransfo: A transfer learning approach for neural network based conversational agents", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.08149" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Victor Sanh, Julien Chaumond, and Clement Delangue. 2019. Transfertransfo: A transfer learning approach for neural network based conversational agents. arXiv preprint arXiv:1901.08149.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Aditya Barua, and Colin Raffel. 2021. mt5: A massively multilingual pre-trained text-to-text transformer", |
|
"authors": [ |
|
{ |
|
"first": "Linting", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Constant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihir", |
|
"middle": [], |
|
"last": "Kale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rami", |
|
"middle": [], |
|
"last": "Al-Rfou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Siddhant", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "483--498", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linting Xue, Noah Constant, Adam Roberts, Mi- hir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, and Colin Raffel. 2021. mt5: A massively multilingual pre-trained text-to-text transformer. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 483-498.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Personalizing dialogue agents: I have a dog, do you have pets too?", |
|
"authors": [ |
|
{ |
|
"first": "Saizheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jack", |
|
"middle": [], |
|
"last": "Urbanek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arthur", |
|
"middle": [], |
|
"last": "Szlam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2204--2213", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saizheng Zhang, Emily Dinan, Jack Urbanek, Arthur Szlam, Douwe Kiela, and Jason Weston. 2018. Per- sonalizing dialogue agents: I have a dog, do you have pets too? In Proceedings of the 56th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), pages 2204-2213. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Dialogpt: Large-scale generative pre-training for conversational response generation", |
|
"authors": [ |
|
{ |
|
"first": "Yizhe", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siqi", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yen-Chun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.00536" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, and Bill Dolan. 2019. Dialogpt: Large-scale generative pre-training for conversational response generation. arXiv preprint arXiv:1911.00536.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Ten pairs to tagmultilingual pos tagging via coarse mapping between embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Gaddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Jaakkola", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1307--1317", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuan Zhang, David Gaddy, Regina Barzilay, and Tommi Jaakkola. 2016. Ten pairs to tag- multilingual pos tagging via coarse mapping be- tween embeddings. In Proceedings of the 2016 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 1307-1317.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Learning discourse-level diversity for neural dialog models using conditional variational autoencoders", |
|
"authors": [ |
|
{ |
|
"first": "Tiancheng", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ran", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxine", |
|
"middle": [], |
|
"last": "Eskenazi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "654--664", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tiancheng Zhao, Ran Zhao, and Maxine Eskenazi. 2017. Learning discourse-level diversity for neural dialog models using conditional variational autoen- coders. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 654-664.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"text": "(a) Multilingual Causal Decoder model. (b) Multilingual Encoder-Decoder model.", |
|
"type_str": "figure" |
|
}, |
|
"TABREF3": { |
|
"text": "Results of automatic evaluation score on test set in seven languages. We compute the BLEU score and perplexity (ppl.) for monolingual, multilingual, and cross-lingual models.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"text": "", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>: Results of ACUTE-EVAL human evaluation. Tests are conducted pairwise between M-CausalBert (Multi.)</td></tr><tr><td>and other models (Human, Poly-encoder (Poly), Monolingual CausalBert (Mono)). Numbers indicate the win-</td></tr><tr><td>ning rate of M-CausalBert. Numbers in bold are statistically significant (p < 0.05).</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"text": "System PersonaMy favorite activity is rock climbing.I'm a mechanical engineer. I like to go hiking in the spring.I live in colorado.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Dialogue</td></tr><tr><td>Usr</td><td>studio inglese oggi e tu? (I am studying English today, you?)</td></tr><tr><td colspan=\"2\">Sys just watching netflix with friends</td></tr><tr><td>Usr</td><td>\u6211\u559c\u6b22\u6500\u5ca9\uff0c\u4f60\u5462\uff1f (I like rock climbing, what about you?)</td></tr><tr><td>Sys</td><td>nice, I enjoy it while in the summer. what are you up to?</td></tr><tr><td>Usr</td><td>saya ingin melakukan panjat tebing (I want to go to hike)</td></tr><tr><td colspan=\"2\">Sys I'm going hiking the most time of year</td></tr><tr><td>Usr</td><td>\u30b9\u30dd\u30fc\u30c4\u304c\u304a\u597d\u304d\u306a\u3088\u3046\u3067\u3059\u306d (It seems you like sports)</td></tr><tr><td>Sys</td><td>I love the outdoors too, do you have any hobbies?</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF7": { |
|
"text": "M-CausalBert handles extra-Sentential codeswitching context without any language identifiers.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |