|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:55:04.467521Z" |
|
}, |
|
"title": "PLATO-KAG: Unsupervised Knowledge-Grounded Conversation via Joint Modeling", |
|
"authors": [ |
|
{ |
|
"first": "Xinxian", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Baidu Inc", |
|
"location": { |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Huang", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Baidu Inc", |
|
"location": { |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Siqi", |
|
"middle": [], |
|
"last": "Bao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Baidu Inc", |
|
"location": { |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Fan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Baidu Inc", |
|
"location": { |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Baidu Inc", |
|
"location": { |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Baidu Inc", |
|
"location": { |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Large-scale conversation models are turning to leveraging external knowledge to improve the factual accuracy in response generation. Considering the infeasibility to annotate the external knowledge for large-scale dialogue corpora, it is desirable to learn the knowledge selection and response generation in an unsupervised manner. In this paper, we propose PLATO-KAG (Knowledge-Augmented Generation), an unsupervised learning approach for end-to-end knowledge-grounded conversation modeling. For each dialogue context, the top-k relevant knowledge elements are selected and then employed in knowledgegrounded response generation. The two components of knowledge selection and response generation are optimized jointly and effectively under a balanced objective. Experimental results on two publicly available datasets validate the superiority of PLATO-KAG.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Large-scale conversation models are turning to leveraging external knowledge to improve the factual accuracy in response generation. Considering the infeasibility to annotate the external knowledge for large-scale dialogue corpora, it is desirable to learn the knowledge selection and response generation in an unsupervised manner. In this paper, we propose PLATO-KAG (Knowledge-Augmented Generation), an unsupervised learning approach for end-to-end knowledge-grounded conversation modeling. For each dialogue context, the top-k relevant knowledge elements are selected and then employed in knowledgegrounded response generation. The two components of knowledge selection and response generation are optimized jointly and effectively under a balanced objective. Experimental results on two publicly available datasets validate the superiority of PLATO-KAG.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Recently, the capability of large-scale pre-trained models has been verified in open-domain dialogue generation, including Meena (Adiwardana et al., 2020) , Blender (Roller et al., 2021) , and PLATO-2 (Bao et al., 2020) . Without introducing explicit knowledge in learning process, substantive knowledge is implicitly embedded into parameters from the training corpus. However, these models are found to suffer from knowledge hallucinations (Roller et al., 2021; Marcus, 2020) , producing plausible statements with factual errors. To boost the generation accuracy, there is a trend to leverage external knowledge in addition to the parameters of large-scale pre-trained models (Guu et al., 2020; .", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 154, |
|
"text": "(Adiwardana et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 165, |
|
"end": 186, |
|
"text": "(Roller et al., 2021)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 219, |
|
"text": "(Bao et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 462, |
|
"text": "(Roller et al., 2021;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 463, |
|
"end": 476, |
|
"text": "Marcus, 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 677, |
|
"end": 695, |
|
"text": "(Guu et al., 2020;", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In knowledge-grounded conversation, several datasets have been collected through crowdsourcing (Dinan et al., 2019; Gopalakrishnan et al., 2019; Komeili et al., 2021) . Given that manual annotation is expensive and time-consuming, it is not feasible to annotate the corresponding knowledge for each response on a large scale. Therefore, it is desirable to develop knowledge-grounded dialogue generation models without reliance on explicit knowledge labels.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 115, |
|
"text": "(Dinan et al., 2019;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 116, |
|
"end": 144, |
|
"text": "Gopalakrishnan et al., 2019;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 166, |
|
"text": "Komeili et al., 2021)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Some attempts have been made to learn the unsupervised retrieval of external knowledge based on semantic similarity (Ghazvininejad et al., 2018; Dinan et al., 2019) . Whereas, there exists the oneto-many phenomenon in knowledge-grounded conversation (Kim et al., 2019) , where multiple knowledge elements can be appropriate to reply a given context. The prior top-1 knowledge selection employed by these approaches (Ghazvininejad et al., 2018; Dinan et al., 2019) has difficulties to hit the knowledge contained in the target response, deteriorating the learning of knowledge utilization. As an improvement, PostKS (Lian et al., 2019) and KnowledGPT (Zhao et al., 2020) rely on the target response to identify the grounded knowledge. However, involving the posterior knowledge selection will inevitably cause discrepancy between the training and inference stages (Zhao et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 144, |
|
"text": "(Ghazvininejad et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 164, |
|
"text": "Dinan et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 250, |
|
"end": 268, |
|
"text": "(Kim et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 443, |
|
"text": "(Ghazvininejad et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 463, |
|
"text": "Dinan et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 615, |
|
"end": 634, |
|
"text": "(Lian et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 650, |
|
"end": 669, |
|
"text": "(Zhao et al., 2020)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 863, |
|
"end": 882, |
|
"text": "(Zhao et al., 2019)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose an unsupervised approach for end-to-end knowledge-grounded conversation modeling, namely PLATO-KAG (Knowledge-Augmented Generation). As shown in Figure 1 , given each dialogue context, the top-k relevant knowledge elements are selected for the subsequent response generation. Then, the model learns to generate the target response grounded on each of the selected knowledge. The generation probability can in turn provide backpropagating signal for the precedent knowledge selection. These two components of knowledge selection and response generation are optimized jointly.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 179, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Two essential ingredients contribute to the performance of PLATO-KAG: top-k knowledge selection and balanced joint training. Firstly, in comparison to the conventional top-1 selection, top-k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Knowledge Selection ! ( | ) Knowledge Grounded Response Generation \" ( | , ) Transformer Blocks Context Transformer Blocks ( ) ( # )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Transformer Blocks", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Knowledge Context Response \u22c5 \u22c5 CLS \u22c5 \u22c5 Knowledge \u22c5 \u22c5 CLS \u22c5 \u22c5 \u22c5 \u22c5 \u22c5 \u22c5 \u22c5 \u22c5 Top-k ( $ ) ( % ) ( & )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Zumba is an interesting fitness exercise with so many variations. End-to-End Backpropagation through and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "( | )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Figure 1: An overview of joint training in PLATO-KAG. For each dialogue context, top-k relevant knowledge elements are selected and employed in response generation. The generation probability can reflect the quality of the precedent knowledge selection. These two components of knowledge selection \u03b8 and response generation \u03c6 are optimized jointly in an unsupervised manner.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "selection remarkably increases the chance to hit the grounded knowledge and improves the effectiveness of prior knowledge selection. Without the interlude of posterior knowledge selection, we manage to avoid the discrepancy between training and inference stages. Secondly, considering the difference of knowledge selection and response generation, balanced training is further designed for their effective joint optimization. To evaluate the performance of the proposed method, comprehensive experiments have been carried out on two publicly available datasets. Experimental results demonstrate that our method achieves better performance as compared with other state-of-the-art unsupervised approaches. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There are two main components in PLATO-KAG: knowledge selection and knowledge-grounded response generation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As shown in Figure 1 , a dual encoder with shared parameters (Siamese network) (Bromley et al., 1993) is employed in knowledge selection, where the semantic representations of the dialogue context and knowledge are extracted independently. Then the relevance between the dialogue context c and each piece of knowledge z is estimated by:", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 101, |
|
"text": "(Bromley et al., 1993)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 20, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Knowledge Selection", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "f (c, z) = (W c E(c)) T (W z E(z))", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Knowledge Selection", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where E(\u2022) is the encoder's output on the [CLS] token, corresponding to the input's pooled repre-sentation. W c and W z denotes the linear projection matrix for the dialogue context and knowledge, respectively. The relevance function f calculates the inner product of these two projected embeddings. For the subsequent response generation, the top-k knowledge elements with highest relevance scores are selected. The prior selection probability is further normalized as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 47, |
|
"text": "[CLS]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Selection", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p \u03b8 (z|c) = exp(f (c, z)) z exp(f (c, z ))", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Knowledge Selection", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where z is one element from the top-k relevant knowledge. The benefits brought by the top-k knowledge selection are two-fold. First, top-k selection significantly increases the robustness of prior knowledge selection, as compared with the widely adopted top-1 knowledge selection (Dinan et al., 2019) . As mentioned before, there exists the one-to-many problem in knowledge-grounded conversation (Kim et al., 2019) . The top-k selection remarkably increases the chance to hit the knowledge and facilitates the training of generation model grounded on appropriate knowledge. Second, for the generation of one response, it is computational intractable to marginalize over the whole knowledge set. The top-k selection is an effective approximation, as most knowledge elements are not relevant with the current dialogue context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 280, |
|
"end": 300, |
|
"text": "(Dinan et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 414, |
|
"text": "(Kim et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Selection", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The overall probability of generating the target response is estimated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge-Grounded Response Generation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(r|c) = z p \u03b8 (z|c)p \u03c6 (r|c, z)", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Knowledge-Grounded Response Generation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where the summation is running over the top-k selected knowledge elements. The second part of knowledge-grounded response generation can be further decomposed into the following form, if conditioned on one piece of knowledge:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge-Grounded Response Generation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p \u03c6 (r|c, z) = T t p \u03c6 (r t |c, z, r <t )", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Knowledge-Grounded Response Generation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where r <t = r 1 , ..., r t\u22121 . In fact, the above generation probability is dependent on the quality of knowledge selection. If the selected knowledge is coherent to the context and relevant to the target response, it is able to benefit the prediction of the target response and lead to a higher generative probability. Otherwise, it leads to a lower probability. As such, the generative probability given by Equation (4) can in turn provide learning signal for the precedent knowledge selection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge-Grounded Response Generation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In PLATO-KAG, the knowledge selection and response generation are optimized jointly. Depending on the marginalization strategy over knowledge , the objective in Equation 3can be expanded in the following two ways:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Balanced Joint Training", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "p seq (r|c) = z p \u03b8 (z|c) T t p \u03c6 (r t |c, z, r <t ) (5a) p tok (r|c) = T t z p \u03b8 (z|c)p \u03c6 (r t |c, z, r <t ) (5b)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Balanced Joint Training", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In the sequence form of Equation (5a), it relies on one knowledge element to predict the whole sequence of the target response. In the token form of Equation (5b), the generative process can rely on different knowledge elements independently for each token. With the sequence form, the selection of knowledge just weight like the generation of one response token. Given the long responses in knowledgegrounded conversation 2 , the module of knowledge selection is at a distinct disadvantage during joint optimization. With the token form, the weight of knowledge selection becomes identical as that of response generation. However, in the preliminary experiments, some of its generated responses exhibit some degree of knowledge misuse, where knowledge fragments are mixed inappropriately.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Balanced Joint Training", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The proposed method combines the merits of these two forms and introduces the following joint training objective for knowledge-grounded dialogue generation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Balanced Joint Training", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "p(r|c) \u221d z p \u03b8 (z|c) T t p \u03c6 (r t |c, z, r <t ) \u03b1 (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Balanced Joint Training", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where \u03b1 > 0 is a variable controlling the weight of knowledge selection and response generation. The sequence form is preserved for the sake of generation accuracy. It is worth noting that these two components are complementary to each other. A too small or too large value of \u03b1 can lead to biased and ineffective optimization. When \u03b1 is close to 0, the optimization focuses on knowledge selection, neglecting the signals from response generation. When \u03b1 approaches positive infinity, the optimization focuses on response generation, ignoring the effects of knowledge selection. Therefore, it is crucial to keep the balance during the joint optimization. In PLATO-KAG, \u03b1 is set to 1/T , where T is the length of target response. Through the adaptive normalization on the second term, our method successfully maintains the balance between knowledge selection and knowledge-grounded response generation. More analyses on the component weight are included in the experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Balanced Joint Training", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "3.1 Settings", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We conducted experiments on two knowledgegrounded conversation datasets: Wizard of Wikipedia (WoW) (Dinan et al., 2019) and Holl-E (Moghe et al., 2018) . In Wizard of Wikipedia, two participants conduct in-depth discussion on a chosen beginning topic. One of the participants has access to relevant knowledge and plays the role of an expert (wizard). The other one acts as a curious learner (apprentice). There are 18,430/1,948/1,933 dialogues in the training/validation/test set. Validation and test sets are further split into seen and unseen parts, where the latter one is about new topics outside the training set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 119, |
|
"text": "(Dinan et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 131, |
|
"end": 151, |
|
"text": "(Moghe et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "In Holl-E, a single document about a specific movie is given as external knowledge for two participants to discuss in the conversation. There are 7,228/930/913 dialogues in the training/validation/test set. To facilitate the evaluation, the test set includes multiple reference responses for each dialogue context. We use the scripts provided by Kim et al. (2019) to process this dataset. 3", |
|
"cite_spans": [ |
|
{ |
|
"start": 346, |
|
"end": 363, |
|
"text": "Kim et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "As these two datasets have annotated the ground truth knowledge used by participants to ground their conversation responses, both components of knowledge selection and knowledge-grounded response generation can be evaluated thoroughly in the experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "We compared the proposed method with the following approaches. Transformer Memory Network (TMN) is a classical knowledge-grounded dialogue generation method (Dinan et al., 2019) . Its training can be carried out in a supervised or unsupervised way, depending on whether the ground truth knowledge label is involved or not. In our experiments, we also included the supervised TMN as the performance upper bound of unsupervised models for reference. PostKS is an unsupervised approach, which employs the target response to estimate the posterior distribution over knowledge (Lian et al., 2019) . During training, the KL divergence is employed to reduce the gap between prior and posterior distributions. During inference, it will rely on the prior distribution to select knowledge for response generation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 177, |
|
"text": "(Dinan et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 572, |
|
"end": 591, |
|
"text": "(Lian et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "KnowledGPT employs a cross encoder for knowledge selection (Zhao et al., 2020) . It constructs pseudo knowledge labels based on word overlaps and uses them as weak supervision signals to warm up the models. The knowledge selection is then optimized using reinforcement learning with the rewards from generated responses. The response generation is learned gradually conditioned on knowledge selected from pseudo label to the prior distribution. They are optimized iteratively under their corresponding training objectives.", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 78, |
|
"text": "(Zhao et al., 2020)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "We initialized the model parameters of knowledge selection and response generation with pre-trained dialogue generation models (Bao et al., 2020) . There are 24 transformer blocks and 16 attention heads, with the embedding dimension of 1024. The maximum sequence length of context, knowledge and response is set to 256, 128 and 128, respectively. We used Adam optimizer (Kingma and Ba, 2015) with a learning rate of 2e \u2212 5 and a batch size of 64. The number of relevant knowledge elements (top-k) was set to 8 during training. Detailed explorations of top-k settings on the validation sets are included in the Appendix. The training process was carried out on 8 Nvidia Tesla V100 32G GPU cards. Following the convention in knowledge-grounded conversation, only the most relevant knowledge was selected for response generation during inference.", |
|
"cite_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 145, |
|
"text": "(Bao et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "Since the original TMN and PostKS are developed on shallow networks, for the sake of fair comparison, we re-implemented them and initialized the model parameters in the way as the proposed method. For KnowledGPT, we used its opensourced checkpoint 4 in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "In the automatic evaluation, Perplexity (PPL) and Unigram F1 of ground truth responses (Dinan et al., 2019) are adopted to assess the response quality. Recall@1 (top-1 knowledge accuracy) is used to evaluate the performance of knowledge selection. We used the evaluation scripts provided by Dinan et al. (2019) . 5 In the human evaluation, we randomly sampled 100 examples from WoW seen and unseen test set, respectively. Each sample was distributed to three annotators and evaluated on the four aspects:", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 107, |
|
"text": "(Dinan et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 291, |
|
"end": 310, |
|
"text": "Dinan et al. (2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 313, |
|
"end": 314, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "\u2022 Coherence evaluates whether the response is consistent and relevant with the context. \u2022 Informativeness assesses whether the response contains appropriate information. \u2022 Engagingness measures the annotator's willingness to discuss with the speaker for a long conversation. \u2022 Hallucination estimates the factual correctness in the response. Coherence, informativeness and engagingness are scored on a range of [0, 1, 2], with the higher value, the better. Hallucination is evaluated on a range of [0, 1], where 0 means the response is factually correct and 1 means the response contains factual errors. The scoring criteria are provided in the Appendix. The final score of each sample was determined through majority voting. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "The evaluation results on the WoW test sets are summarized in Table 1 . Besides the unsupervised models, the supervised TMN with reliance on knowledge labels during training was also included in the experiments for reference. The automatic and human evaluation results demonstrate that PLATO-KAG achieves better performance as compared with other state-of-the-art unsupervised approaches, even on par with the supervised approach. Based on appropriate knowledge selection, PLATO-KAG produces high-quality responses that are coherent, informative and engaging. Moreover, it alleviates the problem of knowledge hallucinations and generates more factual accurate responses. As shown in the Table 1 , unsupervised TMN generates less informative responses and suffers from a higher degree of hallucination. As for PostKS, based on inferior prior knowledge selection, it generates less coherent responses. Since KnowledGPT employs a cross encoder in the knowledge selection, it achieves a higher value of Recall@1. While cross encoder is hardly feasible for practical deployment given its expensive computation cost. Another factor that attributes to the weak performance of KnowledGPT might be the pre-training models used for initialization 6 . The average Fleiss's kappa (Fleiss, 1971) in human evaluation is 0.502, indicating that annotators have reached moderate agreement.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1237, |
|
"end": 1238, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1268, |
|
"end": 1282, |
|
"text": "(Fleiss, 1971)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 69, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 687, |
|
"end": 694, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The evaluation results on the Holl-E test set are summarized in Table 2 . In the evaluation on the multiple reference test set, we took the best score over multiple reference responses for each dialogue context. The results demonstrate that PLATO-KAG also achieves competitive results in Holl-E. PostKS obtains a slightly higher value on Unigram F1 than PLATO-KAG and supervised TMN. While the values on Distinct-1/2 (Li et al., 2016) indicate the PLATO-KAG and supervised TMN might have better capacity on lexical diversity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 417, |
|
"end": 434, |
|
"text": "(Li et al., 2016)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 71, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "For further qualitative analysis, two examples of generated responses from the WoW test set are provided in Table 3 . It can be observed that unsupervised TMN suffers from low-quality response generation, such as generic replies with little information or statements with factual errors. In comparison, PostKS and KnowledGPT are able to generate much more informative responses, depicting contents from the selected knowledge. However, the responses fail to be coherent with the dialogue context due to the inferior knowledge selection. Among these unsupervised approaches, PLATO-KAG achieves better performance, producing coherent and informative responses.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 115, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Case Analysis", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "The above analysis is also validated by the re- sults on knowledge dependency. Knowledge F1 (Lian et al., 2019) , which is defined as the unigram F1 between the generated response and the selected knowledge, can measure the degree of knowledge dependency. A too low Knowledge F1 value means the models hardly reference the knowledge when generating responses. A too high value indicates the models \"copy\" the knowledge too much, which might cause unnatural responses. is involved in their training process, their generation models learn to rely heavily on the provided knowledge, resulting in very high Knowledge F1 values. During inference with their inferior prior knowledge selection, this kind of strong dependency will lead to unrelated and unnatural response generation. Our method gets exempt from this discrepancy with end-to-end modeling and optimization. The close values of PLATO-KAG and the ground truth (0.347/0.334 on seen and 0.340/0.335 on unseen) indicates our method achieves a natural degree of knowledge utilization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 111, |
|
"text": "(Lian et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Analysis", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "As discussed in the introduction, conversation models are turning to leveraging external knowledge explicitly to boost generation accuracy. To quantitatively analyze the performance, one dialogue generation model was trained on the WoW dataset without grounding on external knowledge, denoted as PLATO-KAG w/o EK. We asked annotators to compare the hallucination and informativeness between our method and PLATO-KAG w/o EK, with results summarized in Table 5 . It is notable that the tie score of hallucination from PLATO-KAG w/o EK is a little inflated. This is because the model generates less informative responses, which helps keep the factual correctness (less talk, less mistake). With access to external knowledge, our method achieves better performance consistently. Moreover, the performance gaps on both metrics get enlarged from the seen to unseen test set. PLATO-KAG w/o EK produces plausible statements with factual errors more easily under unseen topics. Two examples of generated responses by these two models are shown in Table 6 , where the contents with factual errors are displayed in italic blocks. It reveals that PLATO-KAG w/o EK has difficulties to memorize and describe the knowledge details precisely. In fact, the initial publication of Harry Potter is in 1997 and Hermione Granger is one representative character in the book instead of a publisher. Sometimes, PLATO-KAG w/o EK produces statements that are obviously problematic and against the common sense, like \"a rectangular ball\". By leveraging external knowledge, PLATO-KAG can generate more accurate and informative responses. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 451, |
|
"end": 458, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 1038, |
|
"end": 1045, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "External Knowledge Effects on Response Quality", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "As discussed in Section 2.3, the quality of joint optimization is effected by the marginalization strategies and component weight. Explorations on these settings have been carried out on the validation sets, with the perplexity results summarized in Table 7 . For the marginalization strategy, the token form (Equation (5b)), which depends on various knowledge elements to predict one response token, obtains relatively poor results. Under this training paradigm, the model tends to mix information from various knowledge fragments and is prone to generate low-quality responses. Two more examples are included in the Appendix to illustrate this phenomenon.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 250, |
|
"end": 257, |
|
"text": "Table 7", |
|
"ref_id": "TABREF11" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Impacts of Marginalization Strategies and Component Weight", |
|
"sec_num": "3.3.3" |
|
}, |
|
{ |
|
"text": "As comparison, with the marginalization strategy in sequence form (Equation (6)), the models achieve relatively better performance on perplexity. For the sequence form, one crucial factor affecting the performance is the component weight \u03b1 between knowledge selection and knowledgegrounded response generation. Under the straightforward setting (\u03b1 = 1), knowledge selection weighs like one single response token. In PLATO-KAG (\u03b1 = 1/T , where T is the length of target response), the weight of knowledge selection becomes identical to that of the whole response generation. The results indicate PLATO-KAG achieves better performance with the help of balanced training. A too large or too small weight value (such as \u03b1 = T or \u03b1 = 1/T 2 ) will lead to ineffective optimization and performance degradation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Impacts of Marginalization Strategies and Component Weight", |
|
"sec_num": "3.3.3" |
|
}, |
|
{ |
|
"text": "Knowledge-grounded conversation is becoming a more important and popular topic, with several datasets (Zhang et al., 2018; Moghe et al., 2018; Zhou et al., 2018; Dinan et al., 2019; Gopalakrishnan et al., 2019; Komeili et al., 2021) collected to study it. Besides interactive dialogues, some of these datasets have annotated the corresponding knowledge for each response, aiming to ease the learning difficulty of knowledge-grounded conversation. However, given that manual annotation is expensive and time-consuming, it is not feasible to carry out the knowledge labelling on a large scale.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 122, |
|
"text": "(Zhang et al., 2018;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 142, |
|
"text": "Moghe et al., 2018;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 143, |
|
"end": 161, |
|
"text": "Zhou et al., 2018;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 162, |
|
"end": 181, |
|
"text": "Dinan et al., 2019;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 210, |
|
"text": "Gopalakrishnan et al., 2019;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 232, |
|
"text": "Komeili et al., 2021)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Unsupervised approaches have been introduced to model knowledge-grounded conversation. Some of these such as Li et al. (2019) ; Yavuz et al. (2019) ; Lin et al. (2020) perform implicit soft fusion over provided knowledge elements and do not select knowledge explicitly. Some attempts have been made to learn the unsupervised selection of external knowledge based on semantic similarity (Ghazvininejad et al., 2018; Dinan et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 125, |
|
"text": "Li et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 128, |
|
"end": 147, |
|
"text": "Yavuz et al. (2019)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 150, |
|
"end": 167, |
|
"text": "Lin et al. (2020)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 386, |
|
"end": 414, |
|
"text": "(Ghazvininejad et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 434, |
|
"text": "Dinan et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Due to the one-to-many problem in knowledgegrounded conversation (Kim et al., 2019) , the prior top-1 knowledge selection employed by these approaches has difficulties to hit the knowledge contained in the target response, and deteriorates the learning of knowledge utilization. Our top-k selection improves the robustness of prior knowledge selection. Some other works (Lian et al., 2019; Zhao et al., 2020; Ren et al., 2020) employ the target response to identify the grounded knowledge. Since the posterior knowledge selection is involved, it will inevitably cause discrepancy between the training and inference stages (Zhao et al., 2019) . With end-to-end modeling and optimization, PLATO-KAG gets exempt from this discrepancy. KIF (Fan et al., 2021) explicitly selects external knowledge through a retrieval module, and fuses into one integrated representation to assist dialogue generation. While some knowledge details might be obscured with this fusion. As comparison, the knowledge keeps its independence and integrity in our response generation, which helps reduce the hallucination.", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 83, |
|
"text": "(Kim et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 389, |
|
"text": "(Lian et al., 2019;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 390, |
|
"end": 408, |
|
"text": "Zhao et al., 2020;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 426, |
|
"text": "Ren et al., 2020)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 622, |
|
"end": 641, |
|
"text": "(Zhao et al., 2019)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 736, |
|
"end": 754, |
|
"text": "(Fan et al., 2021)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "More recently, attempts to utilize the pre-trained retriever DPR . DPR has been trained on Wikipedia which includes the knowledge sets of WoW and Holl-E. Due to the concern of potential data contamination, we choosed to initialize our knowledge selection module with a general dialogue model which is pre-trained on Reddit. Thus, we facilitated an unbiased setting for our experiments and the analysis of framework generalization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this paper, an unsupervised approach is proposed for end-to-end knowledge grounded conversation modeling. There are two main components in our method: knowledge selection and response generation. Given a dialogue context, top-k relevant knowledge elements are selected and utilized for response generation. The generation probability can in turn provide training signal for the precedent knowledge selection. Joint balanced training is further introduced for the effective optimization of these two components. Comprehensive experiments have been carried out on WoW and Holl-E, verifying the effectiveness and superiority of the proposed method. To decide the proper number of relevant knowledge elements (top-k) for the training process, we conducted 3 runs of experiments for each top-k setting (k = 1, 2, 4, 8, 16). The median results on the validation sets are reported in Table 10 . As discussed in the introduction, the prior top-1 knowledge selection hardly hits the grounded knowledge and suffers from relatively poor results. It also reveals a trend that models with larger k values can achieve better performance. It reaches stable states around k = 8. To balance the efficiency and performance, we set k = 8 in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 880, |
|
"end": 888, |
|
"text": "Table 10", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Our training code and models will be released at https://github.com/PaddlePaddle/Knover/ tree/develop/projects/PLATO-KAG.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For example, the dialogue response has 18.431 words on average in the Wizard of Wikipedia dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/bckim92/ sequential-knowledge-transformer/blob/ master/data/holle.py", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/zhaoxlpku/ KnowledGPT 5 https://github.com/facebookresearch/ ParlAI", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The released checkpoint of KnowledGPT is developed on the general language model GPT-2, while the rest models are developed on dialogue pre-training models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank the anonymous reviewers for their constructive suggestions and Yunyi Yang for the helpful discussions. This work was supported by the Natural Key Research and Development Project of China (No. 2018AAA0101900).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The detailed criteria used in human evaluation are provided in Table 8 . To evaluate the criteria of hallucination, the human annotators were provided with referenced knowledge and allowed to use search engine to check the factual correctness.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 70, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Human Evaluation Scoring Criteria", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Coherence 0\u2022 The response is irrelated with the context.\u2022 The response simply repeats the context.\u2022 The response has obvious conflicts with the context.\u2022 There are serious logic conflicts within the response.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Score", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 The response is less coherent with the context.\u2022 There are minor logic conflicts within the response.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 The response is consistent and relevant with the context.Score Informativeness 0\u2022 The response contains no information.\u2022 The response simply repeats the context and contains no additional information.\u2022 Since coherence score is 0, the information is invalid.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 The response contains a little additional information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 The response has appropriate information.Score Engagingness 0\u2022 I am not willing to talk with the speaker.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 It is a bit boring, but still ok to go on talking with this speaker.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 I would like to talk with the speaker for a long conversation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 The response is factually correct.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "0", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Some details in the response are factually incorrect. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our preliminary experiments, the model trained with token form marginalization strategy exhibits a certain degree of knowledge misuse. Some examples are provided in Table 9 . In the upper case, the fragments from the reference knowledge \"in his spare time\" and \"selling pharmaceuticals\" are mixed to \"selling his spare time to a pharmacist\" in the generated response. The bottom case also exhibits a low level of fidelity to the selected knowledge. One hypothesis to this phenomenon is that under the objective of token form (Equation (5b)), the model learns to blend information across various knowledge fragments to predict one response token. Under this training paradigm, the learned model is prone to hallucinate knowledge, mixing knowledge fragments inappropriately.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 175, |
|
"text": "Table 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Knowledge Hallucination with Token Form Marginalization Strategy", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "[Topic] john grisham [Apprentice] did you know john grisham is a popular author?[Wizard] yes, his first bestseller \"the firm\" sold more than 7 million copies! \u2026 [Apprentice] do you know any other movies that were made into movies from his books?Referenced Knowledge eight of his other novels have also been adapted into films: \"the chamber\", \"the client\", \"a painted house\", \"the pelican brief\", \"the rainmaker\", \"the runaway jury\", \"skipping christmas\", and \"a time to kill\". Generated Response yes, \"the runaway jury\" has also been adapted into other novels \"the client of christmas\", \"children of men\" and \"a time to kill\" ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Towards a human-like opendomain chatbot", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Adiwardana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Fiedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Romal", |
|
"middle": [], |
|
"last": "Thoppilan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Apoorv", |
|
"middle": [], |
|
"last": "Kulshreshtha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gaurav", |
|
"middle": [], |
|
"last": "Nemade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yifeng", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2001.09977" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Adiwardana, Minh-Thang Luong, David R So, Jamie Hall, Noah Fiedel, Romal Thoppilan, Zi Yang, Apoorv Kulshreshtha, Gaurav Nemade, Yifeng Lu, and Quoc V. Le. 2020. Towards a human-like open- domain chatbot. arXiv preprint arXiv:2001.09977.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Plato-2: Towards building an open-domain chatbot via curriculum learning", |
|
"authors": [ |
|
{ |
|
"first": "Siqi", |
|
"middle": [], |
|
"last": "Bao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huang", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenquan", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhen", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhibin", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinchao", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2006.16779" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siqi Bao, Huang He, Fan Wang, Hua Wu, Haifeng Wang, Wenquan Wu, Zhen Guo, Zhibin Liu, and Xinchao Xu. 2020. Plato-2: Towards building an open-domain chatbot via curriculum learning. arXiv preprint arXiv:2006.16779.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Signature verification using a\" siamese\" time delay neural network", |
|
"authors": [ |
|
{ |
|
"first": "Jane", |
|
"middle": [], |
|
"last": "Bromley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Guyon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "S\u00e4ckinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roopak", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "737--744", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jane Bromley, Isabelle Guyon, Yann LeCun, Eduard S\u00e4ckinger, and Roopak Shah. 1993. Signature veri- fication using a\" siamese\" time delay neural network. Advances in neural information processing systems, 6:737-744.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Wizard of wikipedia: Knowledge-powered conversational agents. International Conference on Learning Representations", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Roller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Shuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Dinan, Stephen Roller, Kurt Shuster, Angela Fan, Michael Auli, and Jason Weston. 2019. Wizard of wikipedia: Knowledge-powered conversational agents. International Conference on Learning Rep- resentations.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Augmenting transformers with knn-based composite memory for dialog", |
|
"authors": [ |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "82--99", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angela Fan, Claire Gardent, Chlo\u00e9 Braud, and An- toine Bordes. 2021. Augmenting transformers with knn-based composite memory for dialog. Transac- tions of the Association for Computational Linguis- tics, 9:82-99.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Measuring nominal scale agreement among many raters", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Joseph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fleiss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1971, |
|
"venue": "Psychological bulletin", |
|
"volume": "76", |
|
"issue": "5", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph L Fleiss. 1971. Measuring nominal scale agree- ment among many raters. Psychological bulletin, 76(5):378.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A knowledge-grounded neural conversation model", |
|
"authors": [ |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yih", |
|
"middle": [], |
|
"last": "Wen-Tau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marjan Ghazvininejad, Chris Brockett, Ming-Wei Chang, Bill Dolan, Jianfeng Gao, Wen-tau Yih, and Michel Galley. 2018. A knowledge-grounded neu- ral conversation model. In Proceedings of the AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Topical-chat: Towards knowledge-grounded open-domain conversations", |
|
"authors": [ |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Gopalakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Behnam", |
|
"middle": [], |
|
"last": "Hedayatnia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qinglang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Gottardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Kwatra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anu", |
|
"middle": [], |
|
"last": "Venkatesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raefer", |
|
"middle": [], |
|
"last": "Gabriel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1891--1895", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karthik Gopalakrishnan, Behnam Hedayatnia, Qinglang Chen, Anna Gottardi, Sanjeev Kwatra, Anu Venkatesh, Raefer Gabriel, Dilek Hakkani-T\u00fcr, and Amazon Alexa AI. 2019. Topical-chat: Towards knowledge-grounded open-domain conversations. In Interspeech, pages 1891-1895.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Realm: Retrievalaugmented language model pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Kelvin", |
|
"middle": [], |
|
"last": "Guu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zora", |
|
"middle": [], |
|
"last": "Tung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Panupong", |
|
"middle": [], |
|
"last": "Pasupat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 37th Annual International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3929--3938", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasu- pat, and Ming-Wei Chang. 2020. Realm: Retrieval- augmented language model pre-training. In Pro- ceedings of the 37th Annual International Confer- ence on Machine Learning, pages 3929-3938.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Dense passage retrieval for open-domain question answering", |
|
"authors": [ |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Karpukhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barlas", |
|
"middle": [], |
|
"last": "Oguz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sewon", |
|
"middle": [], |
|
"last": "Min", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ledell", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen-Tau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6769--6781", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020. Dense passage retrieval for open-domain question answering. In Proceedings of the 2020 Conference on Empirical Methods in Nat- ural Language Processing (EMNLP), pages 6769- 6781.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Sequential latent knowledge selection for knowledge-grounded dialogue", |
|
"authors": [ |
|
{ |
|
"first": "Byeongchang", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaewoo", |
|
"middle": [], |
|
"last": "Ahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gunhee", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Byeongchang Kim, Jaewoo Ahn, and Gunhee Kim. 2019. Sequential latent knowledge selection for knowledge-grounded dialogue. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Retrieval-augmented generation for knowledge-intensive nlp tasks", |
|
"authors": [ |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ethan", |
|
"middle": [], |
|
"last": "Perez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aleksandara", |
|
"middle": [], |
|
"last": "Piktus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Petroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Karpukhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heinrich", |
|
"middle": [], |
|
"last": "K\u00fcttler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen-Tau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rockt\u00e4schel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9459--9474", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Hein- rich K\u00fcttler, Mike Lewis, Wen-tau Yih, Tim Rock- t\u00e4schel, et al. 2020. Retrieval-augmented genera- tion for knowledge-intensive nlp tasks. In Advances in Neural Information Processing Systems, pages 9459-9474.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A diversity-promoting objective function for neural conversation models", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William B", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "110--119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and William B Dolan. 2016. A diversity-promoting objective function for neural conversation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 110-119.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Incremental transformer with deliberation decoder for document grounded conversations", |
|
"authors": [ |
|
{ |
|
"first": "Zekang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cheng", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fandong", |
|
"middle": [], |
|
"last": "Meng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "12--21", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zekang Li, Cheng Niu, Fandong Meng, Yang Feng, Qian Li, and Jie Zhou. 2019. Incremental trans- former with deliberation decoder for document grounded conversations. In Proceedings of the 57th Annual Meeting of the Association for Computa- tional Linguistics, pages 12-21.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Learning to select knowledge for response generation in dialog systems", |
|
"authors": [ |
|
{ |
|
"first": "Rongzhong", |
|
"middle": [], |
|
"last": "Lian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinhua", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 28th International Joint Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5081--5087", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rongzhong Lian, Min Xie, Fan Wang, Jinhua Peng, and Hua Wu. 2019. Learning to select knowledge for response generation in dialog systems. In Pro- ceedings of the 28th International Joint Conference on Artificial Intelligence, pages 5081-5087.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Generating informative conversational response using recurrent knowledgeinteraction and knowledge-copy", |
|
"authors": [ |
|
{ |
|
"first": "Xiexiong", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiyu", |
|
"middle": [], |
|
"last": "Jian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianshan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--52", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiexiong Lin, Weiyu Jian, Jianshan He, Taifeng Wang, and Wei Chu. 2020. Generating informative conversational response using recurrent knowledge- interaction and knowledge-copy. In Proceedings of the 58th Annual Meeting of the Association for Com- putational Linguistics, pages 41-52.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The next decade in ai: four steps towards robust artificial intelligence", |
|
"authors": [ |
|
{ |
|
"first": "Gary", |
|
"middle": [], |
|
"last": "Marcus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2002.06177" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gary Marcus. 2020. The next decade in ai: four steps towards robust artificial intelligence. arXiv preprint arXiv:2002.06177.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Towards exploiting background knowledge for building conversation systems", |
|
"authors": [ |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Moghe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siddhartha", |
|
"middle": [], |
|
"last": "Arora", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suman", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitesh M", |
|
"middle": [], |
|
"last": "Khapra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2322--2332", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikita Moghe, Siddhartha Arora, Suman Banerjee, and Mitesh M Khapra. 2018. Towards exploiting back- ground knowledge for building conversation sys- tems. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2322-2332.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Thinking globally, acting locally: Distantly supervised global-to-local knowledge selection for background based conversation", |
|
"authors": [ |
|
{ |
|
"first": "Pengjie", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhumin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christof", |
|
"middle": [], |
|
"last": "Monz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maarten", |
|
"middle": [], |
|
"last": "De Rijke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "34", |
|
"issue": "", |
|
"pages": "8697--8704", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pengjie Ren, Zhumin Chen, Christof Monz, Jun Ma, and Maarten de Rijke. 2020. Thinking globally, acting locally: Distantly supervised global-to-local knowledge selection for background based conver- sation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 8697-8704.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Recipes for building an open-domain chatbot", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Roller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Da", |
|
"middle": [], |
|
"last": "Ju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [], |
|
"last": "Williamson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Shuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 16th Conference of the European Chapter", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "300--325", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M Smith, et al. 2021. Recipes for building an open-domain chatbot. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics, page 300-325.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Retrieval augmentation reduces hallucination in conversation", |
|
"authors": [ |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Shuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Spencer", |
|
"middle": [], |
|
"last": "Poff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Moya", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2104.07567" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kurt Shuster, Spencer Poff, Moya Chen, Douwe Kiela, and Jason Weston. 2021. Retrieval augmentation re- duces hallucination in conversation. arXiv preprint arXiv:2104.07567.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Deepcopy: Grounded response generation with hierarchical pointer networks", |
|
"authors": [ |
|
{ |
|
"first": "Semih", |
|
"middle": [], |
|
"last": "Yavuz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhinav", |
|
"middle": [], |
|
"last": "Rastogi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guan-Lin", |
|
"middle": [], |
|
"last": "Chao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "122--132", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Semih Yavuz, Abhinav Rastogi, Guan-Lin Chao, and Dilek Hakkani-Tur. 2019. Deepcopy: Grounded response generation with hierarchical pointer net- works. In Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue, pages 122- 132.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Personalizing dialogue agents: I have a dog, do you have pets too?", |
|
"authors": [ |
|
{ |
|
"first": "Saizheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jack", |
|
"middle": [], |
|
"last": "Urbanek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arthur", |
|
"middle": [], |
|
"last": "Szlam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2204--2213", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saizheng Zhang, Emily Dinan, Jack Urbanek, Arthur Szlam, Douwe Kiela, and Jason Weston. 2018. Per- sonalizing dialogue agents: I have a dog, do you have pets too? In Proceedings of the 56th Annual Meeting of the Association for Computational Lin- guistics, pages 2204-2213.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Rethinking action spaces for reinforcement learning in end-to-end dialog agents with latent variable models", |
|
"authors": [ |
|
{ |
|
"first": "Tiancheng", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaige", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxine", |
|
"middle": [], |
|
"last": "Eskenazi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1208--1218", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tiancheng Zhao, Kaige Xie, and Maxine Eskenazi. 2019. Rethinking action spaces for reinforcement learning in end-to-end dialog agents with latent vari- able models. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1208-1218.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Knowledgegrounded dialogue generation with pre-trained language models", |
|
"authors": [ |
|
{ |
|
"first": "Xueliang", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Can", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chongyang", |
|
"middle": [], |
|
"last": "Tao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3377--3390", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xueliang Zhao, Wei Wu, Can Xu, Chongyang Tao, Dongyan Zhao, and Rui Yan. 2020. Knowledge- grounded dialogue generation with pre-trained lan- guage models. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing, pages 3377-3390.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "A dataset for document grounded conversations", |
|
"authors": [ |
|
{ |
|
"first": "Kangyan", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shrimai", |
|
"middle": [], |
|
"last": "Prabhumoye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Black", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "708--713", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kangyan Zhou, Shrimai Prabhumoye, and Alan W Black. 2018. A dataset for document grounded con- versations. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, pages 708-713.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF2": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table/>" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Automatic evaluation results on the Holl-E test set. leading figure of the new popular rock group, the new sound of rock and roll. he was very popular.", |
|
"content": "<table><tr><td/><td>[Topic] zumba</td></tr><tr><td/><td>[Wizard] hello dear, have you hard of zumba? is an exercise fitness program created by colombian dancer</td></tr><tr><td>Context</td><td>[Apprentice] i have! my best friend and i occasionally take classes!</td></tr><tr><td/><td>[Wizard] do you know that zumba is a trademark owned by zumba fitness, .</td></tr><tr><td/><td>[Apprentice] i didn't know that. it's interesting because there seem to be so many variations. do you like it?</td></tr><tr><td>Unsupervised TMN</td><td>yes i have tried it and i like it a lot.</td></tr><tr><td>PostKS</td><td>approximately 15 million people take weekly zumba classes in over 200,000 locations across 180 countries.</td></tr><tr><td>KnowledGPT</td><td>its an exercise fitness program created by colombian dancer and cyclist/choreographer alberto \"beto\" \"power pedal\"</td></tr><tr><td>PLATO-KAG</td><td>yes. it incorporates hip hop, soca, samba, merengue and mambo.</td></tr><tr><td>Supervised TMN</td><td>yes i do. it includes hip-hop and samba, socal, salsa, choreography, mambo.</td></tr><tr><td>Ground Truth</td><td>yes, zumba fitness ambassador is abrazilian pop singer claudia leitte</td></tr><tr><td/><td>[Topic] elvis presley</td></tr><tr><td/><td>[Wizard] do you like elvis presley? he was a famous american singer</td></tr><tr><td>Context</td><td>[Apprentice] he is pretty good, i am from nashville, so i've even been to graceland.</td></tr><tr><td/><td>[Wizard] presley was born in tupelo, mississippi, and then to memphis</td></tr><tr><td/><td>[Apprentice] what was elvis's most popular songs?</td></tr><tr><td>Unsupervised TMN</td><td>his first big hit was \"favorite song\"</td></tr><tr><td>PostKS</td><td>he had a series of successful netflix appearances and chart-topping records</td></tr><tr><td colspan=\"2\">KnowledGPT he became the PLATO-KAG his first rca single was \"heartbreak hotel\" and became a number one hit in the united states</td></tr><tr><td>Supervised TMN</td><td>his first rca single, \"heartbreak hotel\" was released in 1956 and became a number one hit in the us</td></tr><tr><td>Ground Truth</td><td>heartbreak hotel was a number one hit in the usa</td></tr></table>" |
|
}, |
|
"TABREF5": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Examples of the generated responses on the WoW seen (upper) and unseen (bottom) test sets.", |
|
"content": "<table><tr><td/><td>WoW Seen Test</td><td>WoW Unseen Test</td></tr><tr><td>Unsupervised TMN</td><td>0.107</td><td>0.106</td></tr><tr><td>PostKS</td><td>0.443</td><td>0.430</td></tr><tr><td>KnowledGPT</td><td>0.385</td><td>0.375</td></tr><tr><td>PLATO-KAG</td><td>0.347</td><td>0.340</td></tr><tr><td>Supervised TMN</td><td>0.314</td><td>0.306</td></tr><tr><td>Ground Truth</td><td>0.334</td><td>0.335</td></tr></table>" |
|
}, |
|
"TABREF6": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Knowledge F1 on the WoW test set.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF8": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Apprentice] everyone has seen harry potter except me. have you ever read the books or watched the movies? [Wizard] yes i am a big fan of it actually. i love reading about chronicles of the life of a young wizard [Apprentice] was he born a wizard?[Wizard] as far as the main story arc concerns, he was and he went to wizard school to improve himself.[Apprentice] who wrote these stories? PLATO-KAG w/o EK it was written by j.k. rowling. and it was published in 1977 by hermione granger.", |
|
"content": "<table><tr><td>: Comparison of hallucination and informative-</td></tr><tr><td>ness between PLATO-KAG and PLATO-KAG w/o EK</td></tr><tr><td>on the WoW test sets.</td></tr><tr><td>Knowledge F1 are reported in Table 4. The re-</td></tr><tr><td>sults indicate that unsupervised TMN suffers from</td></tr><tr><td>poor knowledge utilization. As for PostKS and</td></tr><tr><td>KnowledGPT, since posterior knowledge selection</td></tr></table>" |
|
}, |
|
"TABREF9": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Examples of PLATO-KAG and PLATO-KAG w/o EK on the WoW seen and unseen test sets. Italic blocks are contents with factually errors.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF11": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table><tr><td>: Perplexity under different marginalization</td></tr><tr><td>strategies and component weights on the WoW and</td></tr><tr><td>Holl-E validation sets.</td></tr></table>" |
|
}, |
|
"TABREF12": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Examples of knowledge misuse under token form marginalization strategy on the WoW seen and unseen test sets. Referenced knowledge is the model selected to ground response generation. Italic blocks are contents with factually errors.", |
|
"content": "<table><tr><td colspan=\"6\">C Explorations of Top-k Settings in</td><td/></tr><tr><td colspan=\"4\">Knowledge Selection</td><td/><td/><td/></tr><tr><td/><td colspan=\"2\">WoW Seen</td><td colspan=\"2\">WoW Unseen</td><td colspan=\"2\">Holl-E</td></tr><tr><td>Top-k</td><td>PPL</td><td>Recall @1</td><td>PPL</td><td>Recall @1</td><td>PPL</td><td>Recall @1</td></tr><tr><td>1</td><td>10.583</td><td>0.064</td><td>12.842</td><td>0.061</td><td>16.720</td><td>0.031</td></tr><tr><td>2</td><td>9.897</td><td>0.250</td><td>11.344</td><td>0.228</td><td>10.634</td><td>0.251</td></tr><tr><td>4</td><td>9.865</td><td>0.258</td><td>11.339</td><td>0.228</td><td>10.359</td><td>0.262</td></tr><tr><td>8</td><td>9.863</td><td>0.257</td><td>11.325</td><td>0.231</td><td>10.246</td><td>0.266</td></tr><tr><td>16</td><td>9.871</td><td>0.256</td><td>11.321</td><td>0.231</td><td>10.309</td><td>0.263</td></tr></table>" |
|
}, |
|
"TABREF13": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Perplexity and Recall@1 under different topk settings on the WoW and Holl-E validation sets.", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |