|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:47:41.658848Z" |
|
}, |
|
"title": "Best Practices for Data-Efficient Modeling in NLG: How to Train Production-Ready Neural Models with Less Data", |
|
"authors": [ |
|
{ |
|
"first": "Ankit", |
|
"middle": [], |
|
"last": "Arun", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Soumya", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Vikas", |
|
"middle": [], |
|
"last": "Bhardwaj", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ashwini", |
|
"middle": [], |
|
"last": "Challa", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Pinar", |
|
"middle": [], |
|
"last": "Donmez", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Peyman", |
|
"middle": [], |
|
"last": "Heidari", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hakan", |
|
"middle": [], |
|
"last": "Inan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Shashank", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Anuj", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Shawn", |
|
"middle": [], |
|
"last": "Mei", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Mohan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Natural language generation (NLG) is a critical component in conversational systems, owing to its role of formulating a correct and natural text response. Traditionally, NLG components have been deployed using template-based solutions. Although neural network solutions recently developed in the research community have been shown to provide several benefits, deployment of such model-based solutions has been challenging due to high latency, correctness issues, and high data needs. In this paper, we present approaches that have helped us deploy data-efficient neural solutions for NLG in conversational systems to production. We describe a family of sampling and modeling techniques to attain production quality with lightweight neural network models using only a fraction of the data that would be necessary otherwise, and show a thorough comparison between each. Our results show that domain complexity dictates the appropriate approach to achieve high data efficiency. Finally, we distill the lessons from our experimental findings into a list of best practices for production-level NLG model development, and present them in a brief runbook. Importantly, the end products of all of the techniques are small sequence-to-sequence models (~2Mb) that we can reliably deploy in production. * Author list alphabetical by last name.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Natural language generation (NLG) is a critical component in conversational systems, owing to its role of formulating a correct and natural text response. Traditionally, NLG components have been deployed using template-based solutions. Although neural network solutions recently developed in the research community have been shown to provide several benefits, deployment of such model-based solutions has been challenging due to high latency, correctness issues, and high data needs. In this paper, we present approaches that have helped us deploy data-efficient neural solutions for NLG in conversational systems to production. We describe a family of sampling and modeling techniques to attain production quality with lightweight neural network models using only a fraction of the data that would be necessary otherwise, and show a thorough comparison between each. Our results show that domain complexity dictates the appropriate approach to achieve high data efficiency. Finally, we distill the lessons from our experimental findings into a list of best practices for production-level NLG model development, and present them in a brief runbook. Importantly, the end products of all of the techniques are small sequence-to-sequence models (~2Mb) that we can reliably deploy in production. * Author list alphabetical by last name.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Task-oriented dialog systems are commonplace in automated systems that interact with end users, including digital assistants, technical support agents, and various website navigation helpers. An essential part in any task-oriented dialog system is natural language generation (NLG), which consumes data, typically fed in the form of a dialog act, and converts it into natural language output to be served to the end user. The natural language response of the NLG component should 1) contain all essential information, 2) be contextualized around the user request, and 3) be natural sounding. Such a system requires consideration for content planning, correctness, grammaticality, and naturalness.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "NLG systems employed in commercial settings are typically based on template-based text generation techniques (Reiter and Dale, 2000; Gatt and Krahmer, 2018; Dale, 2020) . In these, humans author a minimal set of responses templates with placeholder slot values. These slots are later filled at runtime, with the dialog input. Although template-based NLG modules are appealing due to their deterministic nature, inherent correctness, and low latency, they have major drawbacks: First, separate templates need to be authored for different response variations; this behavior is unfavorable for scaling. Second, templates authored for a particular domain are commonly not reusable. Lastly, no matter the complexity of the language instilled into templates, they form a strictly discrete set of responses, and therefore are bound to be limited in their response naturalness.", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 132, |
|
"text": "(Reiter and Dale, 2000;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 133, |
|
"end": 156, |
|
"text": "Gatt and Krahmer, 2018;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 168, |
|
"text": "Dale, 2020)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "More recently, advances in neural-network-based (conditional) language generation prompted a new direction in NLG research (Novikova et al., 2017; Budzianowski et al., 2018; Chen et al., 2020; Bal-akrishnan et al., 2019; Peng et al., 2020) . The process is typically split into two steps: (1) serialization of input data into a flattened meaning representation (MR), and (2) using the neural generation model to generate a natural language response conditioned on the MR. The models are trained on data that includes MR, response pairs, and therefore they are able to not only generate desired responses for MRs in their training data, but they are also expected to form coherent responses for novel MRs, owing to the generalization ability of their machine learning (ML) backbone.", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 146, |
|
"text": "(Novikova et al., 2017;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 173, |
|
"text": "Budzianowski et al., 2018;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 174, |
|
"end": 192, |
|
"text": "Chen et al., 2020;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 220, |
|
"text": "Bal-akrishnan et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 221, |
|
"end": 239, |
|
"text": "Peng et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, deploying neural NLG systems in an industry setting is quite challenging. First, it is not trivial to train a model that reliably presents its input data with the high fidelity required from a userserving dialog system. Second, the models require much high-quality human-annotated data, which is resource intensive. Consequently, data annotation is a major limiting factor for scaling model-based NLG across domains and languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we detail our approach to production-level neural NLG, with a focus on scalability and data efficiency. Adopting the tree-structured MR framework introduced in , which allows better control over generated responses, we train sequence-to-sequence RNN models (Sutskever et al., 2014; ) that can produce high-fidelity responses. We then employ a multitude of techniques for reducing the amount of required data, primarily powered by eliminating the \"hidden\" redundancy by grouping data points with similar semantics into buckets. We train models either on the reduced data, or after increasing the size of the dataset using a novel synthetic augmentation technique. We also employ large, pre-trained attention-based language models (Lewis et al., 2019) , fine-tuning them on the same datasets, and then using novel methods to distill their knowledge into smaller sequence-to-sequence models. Further, we train models on data from multiple domains, showing gains over models trained on individual domains when the domains are semantically close together. We conclude with a compiled list of best practices for production-level NLG model development based on our analyses, and we present it as a runbook.", |
|
"cite_spans": [ |
|
{ |
|
"start": 271, |
|
"end": 295, |
|
"text": "(Sutskever et al., 2014;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 743, |
|
"end": 763, |
|
"text": "(Lewis et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "NLG from structured data has been an active research area for decades, facilitated of late by datasets like the E2E Challenge (Novikova et al., 2017) , MultiWoz (Budzianowski et al., 2018) and Conversational Weather . Recently, Seq2Seq models (Wen et al., 2015; Du\u0161ek and Jurc\u0131cek, 2016; , have become popular for their superior naturalness and simplicity. These models have achieved high performance on benchmarks like E2E challenge (Novikova et al., 2017) and WebNLG challenge (Gardent et al., 2017) . However, they require a lot of data making them resource-intensive to stand up and manage at scale.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 149, |
|
"text": "(Novikova et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 188, |
|
"text": "(Budzianowski et al., 2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 261, |
|
"text": "Recently, Seq2Seq models (Wen et al., 2015;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 287, |
|
"text": "Du\u0161ek and Jurc\u0131cek, 2016;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 457, |
|
"text": "(Novikova et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 501, |
|
"text": "(Gardent et al., 2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our work introduces an approach for bootstrapping data-efficient NLG models by auto-annotating unlabelled examples using a large pretrained sequence de-noiser model known as BART (Lewis et al., 2019) fine-tuned on a small annotated dataset. Additionally, to increase data collection efficiency, we present several bucketing strategies, which enable a more uniform data collection process over the possible semantic space. We improve upon the BART auto-annotation technique by combining it with an innovative method of dynamic data-augmentation (DDA) and fine-tuning BART auto-annotation on a small subset of data sampled using a medium grained bucketing approach. We also carried out experiments to examine the effects of bucketing granularity combined with domain complexity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 199, |
|
"text": "(Lewis et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In similar studies, pretrained GPT models (Radford et al., 2019) were used by Chen et al. (2020) and Peng et al. (2020) , who fine-tune them on a small set of in-domain data, but they did not distill these models into ones suitable for production.", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 64, |
|
"text": "(Radford et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 78, |
|
"end": 96, |
|
"text": "Chen et al. (2020)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 101, |
|
"end": 119, |
|
"text": "Peng et al. (2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Interestingly, Wen et al. (2016) demonstrated that the structure of arguments in existing dialogues can be used to guide data collection for low-resource domain adaptation, which is similar to the bucketing strategies explored here. Additionally, Shah et al. (2018) introduce a dialogue self-play method where templates are instantiated with database values to create synthetic utterances, similar to our dynamic data-augmentation method; however, their instantiated templates are then rewritten by crowd-workers, whereas in our DDA method, crowd-sourced utterances are delexicalized and then re-instantiated with random values. Kedzie & McKeown (2019) also make use of a similar technique in their work on 3 Experimental Approach", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 32, |
|
"text": "Wen et al. (2016)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 265, |
|
"text": "Shah et al. (2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 629, |
|
"end": 652, |
|
"text": "Kedzie & McKeown (2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The experiments were conducted using 4 task-oriented datasets: a Conversational Weather dataset introduced in and three additional datasets for the Reminder, Time, and Alarm domains. These four datasets were selected due to their varying level of complexity, which will be explained further in the results section. 1 In addition, these domains provide a good representation of various arguments such as tense, date time, and date time range as well as range queries that are typically seen across conversational systems. Descriptive statistics of the datasets are shown in Table 1 . All of the datasets use a tree structure to store the meaning representation (MR) that has been discussed in . If necessary, they use discourse relations (CONTRAST and JUSTIFY), which encompass a possible list of dialog acts (REQUEST, INFORM, etc.). The dialog acts contain a list of slot key-value pairs to be mentioned. The tree structures are used to present semantic information to the models after flattening. Examples of flattened MRs are shown in Table 2 and Table 3 . The synthetic user queries and scenarios were generated by engineers, the annotated responses were created by human annotators following guidelines written by computational linguists. The responses were verified to be grammatical and correct by the linguists to ensure data quality. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 573, |
|
"end": 580, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1037, |
|
"end": 1056, |
|
"text": "Table 2 and Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "All our datasets present tree-structured input. We found the tree structure helpful in grouping the training examples in order to reduce biases in the model-generated responses because of imbalanced distribution and also to improve data efficiency. We investigated several bucketing strategies that assign scenarios into groups based on their tree structures and argument values at different levels of granularity. During data collection, we observed that compared to random, bucket-assisted gradual data collection improved model performance due to more exhaustive MR coverage. Coarse-grained (CB) This bucketing strategy was the coarsest level of granularity. Under this strategy, the scenarios (MRs) are grouped using high-level argument names, which are at most two levels below the root node. For example, consider a second level argument such as date time that may have multiple nested arguments in different combinations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bucketing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In Coarse-grained bucketing, all variations deeper than date time were ignored. An example is shown in Table 3 , where in both the INFORM 1 and INFORM 2 dialog acts, despite different subarguments for the parent argument date time, variations are ignored. This strategy creates the smallest number of buckets. In spite of the high possible data efficiency using this method, models might exhibit worse performance due to limited MR coverage in the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 110, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Bucketing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Medium-grained (MB) At this level of granularity, all sub-arguments were considered for creation of the bucket hashes. However, for certain pre-determined arguments/sub-arguments with small and finite variation, the argument name was replaced with its value. An example is the argument tense, which has 3 possible values; hence, when creating bucket hashes, we replace the tense argument with tense past, tense present, or tense future. This led to an increase in the bucketing space by the number of possible values for each such argument.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bucketing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In contrast to coarse-grained bucketing, the INFORM 2 and INFORM 3 dialog acts are grouped under different buckets as part of this strategy, since the date time parent argument has different subarguments. Moreover, for the INFORM 3 dialog act, the value of sub-argument colloquial is retained. This implies that if there was another dialog act with the same shape as INFORM 3 dialog act but a different value for the colloquial sub-argument, it would have been grouped into a different bucket than INFORM 3. This strategy increased the number of buckets compared to the CB case, improving coverage of different response variations. An example of medium-grained bucket hash appears in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 684, |
|
"end": 691, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Bucketing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In this strategy, the goal was to group cases into the largest possible number of buckets in which the surface form of the sentence was independent of the argument values decided by linguists (FB). There were three major differences compared with the medium-grained approach: all argument values are considered, with partial delexicalization; argument values under the same argument name can be grouped; and uniqueness of argument values was tracked. For example, as shown in Table 3 , all argument values are considered, where the todo values are delexicalized, while colloquial is not. In addition, if the value of amount or amount remaining is 1, then the surface form of the response might change, since a plural form should be used for numbers more than 1. Therefore, there are two groups of these argument values, one for values greater than 1 and one for the value of 1. Finally, the todo argument values in INFORM 1 and INFORM 2 are the same. Therefore, they are both delexicalized to todo a, which is to differentiate between cases where the todos are different, since the model was allowed to aggregate based on arguments values to limit verbosity and increase naturalness. (If the values of todos were different, they would have been delexicalized to todo a and todo b, resulting in a different bucket hash.)", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 476, |
|
"end": 483, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fine-grained (FB & FBQ)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our production models receive as input a combination of query and MR, in order to enable the possibility of conditioning the surface form of the response based on the query. Therefore, an additional level of bucketing can be achieved by delexicalizing the query (FBQ). For example, in Table 3 , the user has asked about a specific reminder, and the response confirms that by saying \"Yes\" at the beginning. (Saying \"Yes\" might have been unnecessary under a different query.) Since the queries in the datasets were generated synthetically, we could reliably delexicalize the query and consider the delexicalized query during bucket hash creation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 292, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fine-grained (FB & FBQ)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used various metrics to compare the performance of our proposed sampling and modeling approaches across experiments. Mainly, we focused on Tree Accuracy, which is a binary metric indicating whether the tree structure in the response is correct . This metric checks whether the structural tokens in the response are the same as those in the input MR, modulo reordering at the sibling level (see Balakrishnan et al.'s paper for complete details). Tree accuracy is also used in production to guard against hallucination: if tree accuracy fails, we fall back on templates to ensure correct response generation, even if it is less natural. In addition, we report BLEU Score (Papineni et al., 2002) for all of the experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 672, |
|
"end": 695, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Tree Accuracy is a binary metric and can change from 1 to 0 even if one structural token is missing, as intended. We noticed that tree accuracy can fluctuate considerably due to the random initialization of the layer weights and the randomization in mini-batch creation, even if trained on the same dataset with the same training parameters. (This might be due to the fact that the models are trained to optimize for token-level likelihood, not correctness.) To track the effectiveness of the proposed approaches in reducing these fluctuations and increasing Robustness, we report the standard deviation of tree accuracy values based on 5 training instances for each experiment. The reported tree accuracy values for each experiment is the maximum one achieved in the same 5 runs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Human evaluations were used as a qualitative method to raise red-flags in this study. For human evaluation, the authors rated the responses on Correctness and Grammaticality, defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 Correctness: Evaluates semantic correctness of a response. Authors check for hallucinations, missing attributes, attribute aggregation and sentence structure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 Grammaticality: Checks for grammatical correctness of a sentence, which includes subject-verb agreement, word order, completeness, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We report an Acceptability metric, which is the proportion of correct and grammatical responses sent for human evaluation. Due to annotator constraints, we devised a method to select the top 150 most differentiating examples from each domain's test set, in order to provide an understanding of the performance of each approach on the most challenging MRs. First, we categorized all of the test samples using the fine-grained (FB) bucketing technique. Then, for each bucket, the sample with the least number of correct (tree accuracy) responses across all of the experiments was selected if at least one approach responded correctly. Finally, the top 150 buckets with the least correct response were selected.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In our experience, if a model output fails the tree accuracy check it has always been wrong, but passing tree accuracy does not guarantee acceptability. Nonetheless, it should be noted that the reported acceptability numbers are significantly worse than with our production models, as they are focused on the most challenging MRs. The production weather models have had very high acceptability, so we do not report acceptability for the Weather domain due to some bandwidth constraints.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To compare data-efficiency, we defined Data Reduction Rate as the percentage of the initial training examples that can be saved (not used for training) using any of the presented approaches.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The model architectures used in this study are either a sequence-to-sequence one with stacked LSTMs or derivatives of BART (Lewis et al., 2019) with stacked transformers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 143, |
|
"text": "(Lewis et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "In the LSTM-based models, we use trainable 50d GloVe (Pennington et al., 2014) embeddings. Model weight are updated using an ADAM optimizer (Kingma and Ba, 2015). For each experiment, we start with a learning rate of 0.01 and reduce it by a factor of 0.1 if validation loss does not decrease for 2 epochs. Our loss function is label smoothed CrossEntropy, where the beta parameter is between [0.01, 1]. Each model was trained for 100 epochs with a batch size of 32 and terminated when the validation loss stopped decreasing for 5 epochs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 78, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "For BART, we use the 6 layer BART-Base model in fp16 mode. This helps avoid the memory issues, which were faced with using the 12 layer BART model. For each experiment, we use ADAM as our optimizer with 300 warm-up steps. The starting learning rate of 3e-5 is reduced by a factor of 0.5 if validation loss does not decrease for 5 epochs. Each model is trained for 100 epochs with a batch size of 4 and terminated when the validation loss stopped decreasing for 7 epochs. With all models we use a beam size of 1 to decrease latency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "LSTM-based Sequence-to-Sequence Model (S2S) Our main LSTM-based model has a single-layer encoder and a single-layer decoder. The dimensions of both encoder and decoder hidden states are set to 128 with 0.2 dropout. The input to the model is a concatenation of the user query and the meaning representation produced by the dialog management system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We experimented with a simple joint-training strategy for domains with similar responses, MRs, and semantics using the S2S architecture. The datasets were combined and a joint model was trained on them. Here, Alarm and Reminder are the two domains that are similar to each other and thus these were the domains we experimented with for joint-training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint-training (JT)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To increase data efficiency, we carried out experiments using a limited number (1,3,5) of examples per bucket (coarse, medium, and fine) to determine at what training size the performance gains would plateau with more data collection. At very high data efficiency levels, we noticed that the model performance fluctuated significantly based on the argument values, which was unacceptable for a production system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Data Augmentation (DDA)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "An initial idea was to pre-process the input and feed the delexicalized query and MR to the model. Although we could reliably delexicalize the user query during model training, it would have been very unstable to implement such a technique in production. In addition, there were concerns about added latency and higher complexity of the system. Therefore, we trained the model with the raw user query and with an MR in which argument values are lexicalized, which originally resulted in low data-efficiency in our production domains. We devised Dynamic Data Augmentation (DDA) as a new technique to provide robust model response with respect to changes in argument values using only a fraction of human-annotated responses. The idea is to randomly replace pre-processed tokens in the leaf nodes such as todo a, time a, etc.-as shown in the fine-grained example in Table 3 -with a value from a list of possible values, which are not expected to change the surface form of the sentence during mini-batch creation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 864, |
|
"end": 871, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dynamic Data Augmentation (DDA)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used DDA to train on small datasets formed by sampling one or fewer examples per fine-grained bucket. In addition to higher data efficiency, such randomization should theoretically reduce the possibility of over-fitting. Similarly, DDA enables the 1PerBucket sampling technique in low-resource domains, resulting in a more uniform distribution of MR types during training. If the delexicalized query and MR shown in Table 3 are included in the training data, Table 4 demonstrates how DDA would augment the example differently at each epoch.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 419, |
|
"end": 426, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 462, |
|
"end": 469, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dynamic Data Augmentation (DDA)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BART Data Augmentation (BART+DDA) In BART auto-annotation, a small subset of data is sampled by selecting one example from each medium-grained bucket, followed by fine-tuning the BART model directly on this dataset. The fine-tuned BART model is then run on unlabelled scenario data, as part of the sequence-level knowledge distillation step described in the next section (S2S+KD), and the examples which match in tree structure with the input scenario are selected for training data augmentation. Sampling the small data using medium-grained bucketing introduces two issues. Firstly, although most response variations can be captured, the variations where words around argument slots change depending upon the argument value might be missed. Secondly, model performance is not robust to varying argument values.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Data Augmentation (DDA)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "DDA solves both the above issues. In the BART+DDA approach, instead of directly fine-tuning the BART model on a small data, we fine-tune it on the dynamically augmented data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Data Augmentation (DDA)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "S2S+KD BART+DDA suffers from high latency and model size. In an effort to create productionquality models, we run the BART+DDA model on unlabelled scenario inputs, and select examples which match in tree structure. To auto-annotate unlabelled scenarios, we run a beam search of following beam sizes [1, 5, 10] , and select the first response which passes the tree accuracy check. With larger beam sizes, even if the lower responses pass the tree accuracy, they often tend to be incorrect.", |
|
"cite_spans": [ |
|
{ |
|
"start": 299, |
|
"end": 302, |
|
"text": "[1,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 305, |
|
"text": "5,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 309, |
|
"text": "10]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Data Augmentation (DDA)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We then combine the synthetically generated examples labelled by BART with the golden humanlabelled small data, and train a S2S model on it. Using BART as a teacher model to train a smaller, faster S2S model is similar to Kim and Rush's (2016) sequence-level knowledge distillation (KD) approach. S2S+KD+DDA The S2S+KD model can make mistakes because the majority of its training data comes from synthetic data generated by the BART+DDA model. If the BART+DDA model makes a mistake on a particular scenario, these mistakes get amplified because it will be repeated when auto-annotating similar unlabelled scenarios. Even if golden human data has the correct response for these scenarios, it might not be enough to correct these mistakes. With S2S+KD+DDA, we solve this problem by finetuning the S2S+KD model using the DDA approach only on the gold human-labelled small data, as in recent self-training work for MT (He et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 222, |
|
"end": 243, |
|
"text": "Kim and Rush's (2016)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 914, |
|
"end": 931, |
|
"text": "(He et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Data Augmentation (DDA)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The results of selected experiments on Alarm, Time, Reminder and Weather domains are presented in Tables 5, 6 , 7, and 8, respectively. More comprehensive experimental results can be found in the Appendix. In addition, Figure 1 demonstrates comparative plots of all experiments with more than 70.0% tree accuracy and a 70.0% data reduction rate. From a pure data reduction point of view, the results suggest that S2S+KD+DDA and S2S+KD performed the best followed by BART+DDA. S2S+DDA generally improved performance compared to S2S trained on the same data. It can be also observed that joint domain training can improve performance compared to training only on the in-domain data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 109, |
|
"text": "Tables 5, 6", |
|
"ref_id": "TABREF9" |
|
}, |
|
{ |
|
"start": 219, |
|
"end": 227, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The methods proposed and applied in the paper vary significantly in term of complexity and engineering effort needed to execute them. Therefore, we will analyze the results in each domain considering ease of use and scalability, with the major focus still on data reduction while maintaining performance. BART+DDA, S2S+KD and S2S+KD+DDA improved the performance in all of the domains. However, deploying them to production was not justified for all the domains due to higher development and maintenance resources required. Specifically, BART+DDA has very high latency. S2S+KD and S2S+KD+DDA provide similar latency as the S2S variants. However, they require multiple engineering steps including training 3 models sequentially: First, fine-tune a BART+DDA model on a small dataset, followed by auto-annotating large amounts of unlabelled data for augmentation. Finally, S2S+KD and S2S+KD+DDA are trained sequentially on the augmented dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "It is also not trivial to create non-annotated query and MR pairs for a new domain. Language expansion may also prove difficult as the cross-lingual mBART is only available for 25 languages, and cross-lingual extension of the auto-annotation techniques should be verified. KD variants are mostly beneficial where the required resources for annotating more data are high. Moreover, the gap between KD variants and S2S+DDA might diminish with the addition of 500-1000 examples. The balance between the required data annotation resources and the required engineering resources should be considered during approach selection based on the domain complexity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Alarm was the least complex domain studied here. There were only 286 fine-grained (1PerFBQ) buckets, which were reduced to 188 buckets after the variation in user query was ignored (1PerFB). As shown in Table 5 and Figure 1, S2S Bucketed (1PerCB, 1PerMB, 1PerFB, and 1PerFBQ ) experiments did not perform as well as other approaches. Interestingly, S2S+DDA did not perform as well as in other domains, which was probably due to the extremely low training data size in Alarm. However, combining Alarm with Reminder data improved the performance considerably (S2S+JT). S2S+JT tree accuracy reached within 0.2% of the S2S BASE case with a data reduction rate of 93.7%. The KD variants had the highest performance but they required higher development and maintenance resources.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 203, |
|
"end": 210, |
|
"text": "Table 5", |
|
"ref_id": "TABREF9" |
|
}, |
|
{ |
|
"start": 215, |
|
"end": 274, |
|
"text": "Figure 1, S2S Bucketed (1PerCB, 1PerMB, 1PerFB, and 1PerFBQ", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Alarm Domain", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Time was the second simplest domain in this study. There were 860 fine-grained buckets, which were reduced to 330 buckets when the variation in user query was ignored. However, Time was unique in having tense as an argument which could result in errors that could pass tree accuracy. S2S Bucketed (1PerCB, 1PerMB, 1PerFB, and 1PerFBQ) showed significantly lower performance both in terms of tree accuracy and acceptability compared to the S2S BASE experiment. S2S+DDA with 1PerFBQ data achieved tree accuracy of 99.6%, which was just 0.4% lower than S2S BASE (Figure 1 ). In addition, S2S+DDA represented a data reduction of 85.0%. Similar to Alarm, the KD variants perform the best but they require higher engineering resources.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 559, |
|
"end": 568, |
|
"text": "(Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Time Domain", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Reminder was the second most complex domain with more than 1900 fine-grained buckets (1PerFBQ). As shown in Table 7 and Figure 1 , S2S Bucketed experiments with limited data (1PerCB, 1PerMB, 1PerFB, and 1PerFBQ) significantly under performed compared to the S2S BASE case. S2S+DDA with 1PerFB achieve tree accuracy of 96.3% with data reduction of 92.5%. However, a specific issue with change of ordering was detected during the human evaluations, which resulted in considerably low acceptability. A more comprehensive implementation of tree accuracy will be worked on to solved this issue. While joint-training increased both tree accuracy and acceptability over the S2S Bucketed experiments, other methods still outperformed joint-training.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 115, |
|
"text": "Table 7", |
|
"ref_id": "TABREF12" |
|
}, |
|
{ |
|
"start": 120, |
|
"end": 128, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Reminder Domain", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "S2S+KD and S2S+KD+DDA performed higher than other methods. Specifically, S2S+KD+DDA with a data reduction of 92.5% achieved tree accuracy of 98.3%, which was within 1.0% of the S2S BASE experiment. Higher model maintenance resources will be required here as well, which might provide incentives for more data collection and ensuring higher data quality to improve the performance of S2S+DDA or S2S+JT to production levels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reminder Domain", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Weather was the most complex domain in this study. There were thousands of possible scenarios and the dataset size was considerably larger, accordingly. As demonstrated in Table 8 , even a simple method such as S2S with only 1PerMB data achieved performance within 1.0% of the S2S BASE case with a data reduction rate of 74.8%. This is due to the high variety in the data that results in creation of more than 6400 medium-grained buckets, which was considerably higher than other domains. Therefore, we sub-sampled the buckets aggressively to examine the extent of possible data reduction. Using only 1/4 of the fine-grained buckets to train the models in the S2S+KD and S2S+KD+DDA approaches resulted in tree accuracy values within 2.0% of the S2S BASE case. However, BART+DDA and S2S+DDA did not perform comparable to the KD variants. In addition, S2S+KD and S2S+KD+DDA provided low latency, which made the 92.5% data reduction very favorable. In complex domains such as Weather, deploying models trained with more complex approaches that require higher development and maintenance resources is justified by high data-efficiency gains (19,000 fewer training samples here). ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 179, |
|
"text": "Table 8", |
|
"ref_id": "TABREF14" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Weather Domain", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Several considerations are necessary for deploying model-based task-oriented dialogue systems to production. While increasing data efficiency was the primary goal of our study, we also considered and balanced data efficiency gains with several other factors such as acceptability, latency, and the required development and maintenance resources. Focusing on four datasets for domains with varying level of complexity, we propose a sequential domain development run-book, where development of different domains can halt at different steps based on model performance evaluation. The steps are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Bucketing MRs based on a structure (tree-based here) in the data to avoid unnecessary and imbalanced data collection. Collect 1-3 examples per bucket. Train a model and evaluate it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 If data for domains with similar tasks and semantics (like Reminder and Time) are available, Perform joint-training possibly followed by in-domain fine-tuning. Evaluate the model performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Implement Dynamic Data Augmentation (DDA) to reduce the dependency of responses on interchangeable argument values. Train with DDA and evaluate the model performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 First, use pre-trained models (e.g. BART) to generate responses for unlabelled data. Then, combine the augmentation data with human-annotated data and train a small model (KD). Finally, fine-tune the model using DDA with the small human-annotated data. Evaluate the model performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 If necessary, collect more examples per MR bucket and start from the beginning to deploy the model with the lowest required development and maintenance resources. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The datasets can be found at https://github.com/facebookresearch/DataEfficientNLG", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank our reviewers for their helpful feedback. Many thanks to our linguistic engineering team (Anoop Sinha, Shiun-Zu Kuo, Catharine Youngs, Kirk LaBuda, Steliana Ivanova, Ceci Pompeo, and Briana Nettie) for their hard work and for being great partners in this effort. We would also like to thank Jinfeng Rao, Kartikeya Upasani, Ben Gauthier, and Fiona Yee for their contributions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1409.0473" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Constrained decoding for neural NLG from compositional representations in task-oriented dialogue", |
|
"authors": [ |
|
{ |
|
"first": "Anusha", |
|
"middle": [], |
|
"last": "Balakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinfeng", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kartikeya", |
|
"middle": [], |
|
"last": "Upasani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajen", |
|
"middle": [], |
|
"last": "Subba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anusha Balakrishnan, Jinfeng Rao, Kartikeya Upasani, Michael White, and Rajen Subba. 2019. Constrained decoding for neural NLG from compositional representations in task-oriented dialogue. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, July. To appear.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Multiwoz-a large-scale multi-domain wizard-of-oz dataset for task-oriented dialogue modelling", |
|
"authors": [ |
|
{ |
|
"first": "Pawe\u0142", |
|
"middle": [], |
|
"last": "Budzianowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsung-Hsien", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo-Hsiang", |
|
"middle": [], |
|
"last": "Tseng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I\u00f1igo", |
|
"middle": [], |
|
"last": "Casanueva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Ultes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Osman Ramadan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gasic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5016--5026", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pawe\u0142 Budzianowski, Tsung-Hsien Wen, Bo-Hsiang Tseng, I\u00f1igo Casanueva, Stefan Ultes, Osman Ramadan, and Milica Gasic. 2018. Multiwoz-a large-scale multi-domain wizard-of-oz dataset for task-oriented dialogue modelling. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 5016-5026.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Few-shot nlg with pretrained language model", |
|
"authors": [ |
|
{ |
|
"first": "Zhiyu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harini", |
|
"middle": [], |
|
"last": "Eavani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenhu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinyin", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"Yang" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiyu Chen, Harini Eavani, Wenhu Chen, Yinyin Liu, and William Yang Wang. 2020. Few-shot nlg with pre- trained language model. Proceedings of the 58th Annual Meeting of the Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Learning phrase representations using rnn encoder-decoder for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Van Merri\u00ebnboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caglar", |
|
"middle": [], |
|
"last": "Gulcehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fethi", |
|
"middle": [], |
|
"last": "Bougares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1406.1078" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho, Bart Van Merri\u00ebnboer, Caglar Gulcehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using rnn encoder-decoder for statistical machine translation. arXiv preprint arXiv:1406.1078.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Natural language generation: The commercial state of the art in 2020", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Dale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Natural Language Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Dale. 2020. Natural language generation: The commercial state of the art in 2020. Natural Language Engineering. To appear.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Sequence-to-sequence generation for spoken dialogue via deep syntax trees and strings", |
|
"authors": [ |
|
{ |
|
"first": "Ondrej", |
|
"middle": [], |
|
"last": "Du\u0161ek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Filip", |
|
"middle": [], |
|
"last": "Jurc\u0131cek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "The 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ondrej Du\u0161ek and Filip Jurc\u0131cek. 2016. Sequence-to-sequence generation for spoken dialogue via deep syntax trees and strings. In The 54th Annual Meeting of the Association for Computational Linguistics, page 45.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "The WebNLG challenge: Generating text from RDF data", |
|
"authors": [ |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anastasia", |
|
"middle": [], |
|
"last": "Shimorina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Perez-Beltrachini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 10th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "124--133", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claire Gardent, Anastasia Shimorina, Shashi Narayan, and Laura Perez-Beltrachini. 2017. The WebNLG chal- lenge: Generating text from RDF data. In Proceedings of the 10th International Conference on Natural Lan- guage Generation, pages 124-133, Santiago de Compostela, Spain, September. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Survey of the state of the art in natural language generation: Core tasks, applications and evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Gatt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "61", |
|
"issue": "", |
|
"pages": "65--170", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Albert Gatt and Emiel Krahmer. 2018. Survey of the state of the art in natural language generation: Core tasks, applications and evaluation. Journal of Artificial Intelligence Research, 61:65-170.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Revisiting self-training for neural sequence generation", |
|
"authors": [ |
|
{ |
|
"first": "Junxian", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junxian He, Jiatao Gu, Jiajun Shen, and Marc'Aurelio Ranzato. 2020. Revisiting self-training for neural sequence generation. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A good sample is hard to find: Noise injection sampling and selftraining for neural language generation models", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Kedzie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 12th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "584--593", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Kedzie and Kathleen McKeown. 2019. A good sample is hard to find: Noise injection sampling and self- training for neural language generation models. In Proceedings of the 12th International Conference on Natural Language Generation, pages 584-593, Tokyo, Japan, October-November. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Sequence-level knowledge distillation", |
|
"authors": [ |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1317--1327", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoon Kim and Alexander Rush. 2016. Sequence-level knowledge distillation. In Proceedings of the 2016 Confer- ence on Empirical Methods in Natural Language Processing, pages 1317-1327.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Adam: A method for stochastic optimization. the 3rd International Conference for Learning Representations", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. the 3rd International Conference for Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdelrahman", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ves", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.13461" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoy- anov, and Luke Zettlemoyer. 2019. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Multilingual denoising pre-training for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXivpreprintarXiv:2001.08210" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pre-training for neural machine translation. In arXiv preprint arXiv:2001.08210.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "The e2e dataset: New challenges for end-to-end generation", |
|
"authors": [ |
|
{ |
|
"first": "Jekaterina", |
|
"middle": [], |
|
"last": "Novikova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Du\u0161ek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Verena", |
|
"middle": [], |
|
"last": "Rieser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1706.09254" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jekaterina Novikova, Ond\u0159ej Du\u0161ek, and Verena Rieser. 2017. The e2e dataset: New challenges for end-to-end generation. arXiv preprint arXiv:1706.09254.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. ACL-02", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proc. ACL-02.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Few-shot natural language generation for task-oriented dialog", |
|
"authors": [ |
|
{ |
|
"first": "Baolin", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenguang", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunyuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiujun", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinchao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2002.12328" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baolin Peng, Chenguang Zhu, Chunyuan Li, Xiujun Li, Jinchao Li, Michael Zeng, and Jianfeng Gao. 2020. Few-shot natural language generation for task-oriented dialog. arXiv preprint arXiv:2002.12328.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word represen- tation. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Language models are unsupervised multitask learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "OpenAI Blog", |
|
"volume": "1", |
|
"issue": "8", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog, 1(8):9.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A tree-to-sequence model for neural nlg in task-oriented dialog", |
|
"authors": [ |
|
{ |
|
"first": "Jinfeng", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kartikeya", |
|
"middle": [], |
|
"last": "Upasani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anusha", |
|
"middle": [], |
|
"last": "Balakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anuj", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajen", |
|
"middle": [], |
|
"last": "Subba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 12th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "95--100", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinfeng Rao, Kartikeya Upasani, Anusha Balakrishnan, Michael White, Anuj Kumar, and Rajen Subba. 2019. A tree-to-sequence model for neural nlg in task-oriented dialog. In Proceedings of the 12th International Confer- ence on Natural Language Generation, pages 95-100.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Building Natural-Language Generation Systems", |
|
"authors": [ |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Reiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Dale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ehud Reiter and Robert Dale. 2000. Building Natural-Language Generation Systems. Cambridge University Press.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Bootstrapping a neural conversational agent with dialogue self-play, crowdsourcing and on-line reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Pararth", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-T\u00fcr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gokhan", |
|
"middle": [], |
|
"last": "T\u00fcr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "41--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pararth Shah, Dilek Hakkani-T\u00fcr, Bing Liu, and Gokhan T\u00fcr. 2018. Bootstrapping a neural conversational agent with dialogue self-play, crowdsourcing and on-line reinforcement learning. In Proceedings of the 2018 Con- ference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 3 (Industry Papers), pages 41-51, New Orleans -Louisiana, June. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Sequence to sequence learning with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3104--3112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural networks. In Advances in neural information processing systems, pages 3104-3112.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Semantically conditioned LSTM-based natural language generation for spoken dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Tsung-Hsien Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Gasic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pei-Hao", |
|
"middle": [], |
|
"last": "Mrk\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Vandyke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1711--1721", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Hsien Wen, Milica Gasic, Nikola Mrk\u0161i\u0107, Pei-Hao Su, David Vandyke, and Steve Young. 2015. Semanti- cally conditioned LSTM-based natural language generation for spoken dialogue systems. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 1711-1721. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Multi-domain neural network language generation for spoken dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Tsung-Hsien Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Gasic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lina", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Mrksic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pei-Hao", |
|
"middle": [], |
|
"last": "Rojas-Barahona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Vandyke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Hsien Wen, Milica Gasic, Nikola Mrksic, Lina M. Rojas-Barahona, Pei-Hao Su, David Vandyke, and Steve Young. 2016. Multi-domain neural network language generation for spoken dialogue systems.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "Change of tree accuracy vs. training data size and data reduction for the proposed approaches." |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Number of examples in training, validation, and test sets for all domains in addition to number of different buckets in the training set. CB, MB, FBQ, and FB stand for coarse-grained, medium-grained, fine-grained combined with query, and fine-grained buckets, respectively.", |
|
"content": "<table><tr><td>self-training for neural NLG; by comparison, we experiment with DDA in a wider variety of training</td></tr><tr><td>scenarios.</td></tr></table>" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "A training example with a discourse relation (bold black node). Blue nodes are the dialog acts, red nodes are the first level arguments under dialog acts and orange nodes are the second level arguments. Argument values at the leaf nodes and terminal tokens are in black.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "QueryDo I have any reminder to buy milk? Reference Yes, there are 3 reminders. The first two are, buy milk at 7 PM and tomorrow. There's 1 other reminder.", |
|
"content": "<table><tr><td>Delexicalized Query</td><td>Do I have any reminder to todo a ?</td></tr><tr><td/><td>INFORM 1[amount]</td></tr><tr><td>Coarse grained</td><td>INFORM 2[todo date time]</td></tr><tr><td>Bucket Hash</td><td>INFORM 3[todo date time]</td></tr><tr><td/><td>INFORM 4[amount remaining]</td></tr><tr><td/><td>INFORM 1[amount]</td></tr><tr><td colspan=\"2\">Medium grained INFORM 2[todo date time[time]]</td></tr><tr><td>Bucket Hash</td><td>INFORM 3[todo date time[colloquial[ tomorrow ]]]</td></tr><tr><td/><td>INFORM 4[amount remaining]</td></tr><tr><td/><td>INFORM 1[amount[ amount gr1 ]]</td></tr><tr><td>Fine grained</td><td>INFORM 2[todo[ todo a ] date time[time[ time a ]]]</td></tr><tr><td>Bucket Hash</td><td>INFORM 3[</td></tr></table>" |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "", |
|
"content": "<table/>" |
|
}, |
|
"TABREF7": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "", |
|
"content": "<table/>" |
|
}, |
|
"TABREF9": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results on selected Alarm domain experiments in percentage.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF11": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results on selected Time domain experiments in percentage.", |
|
"content": "<table><tr><td>Data</td><td>Approach</td><td colspan=\"2\">BLEU SCORE ACCURACY TREE</td><td>Acceptability</td><td>DATA REDUCTION</td><td>TREEACC STDEV</td></tr><tr><td>ALL</td><td>S2S BASE</td><td>92.6</td><td>99.3</td><td>-</td><td>0.0</td><td>0.1</td></tr><tr><td>5PerMB+Alarm</td><td>S2S+JT</td><td>92.4</td><td>97.6</td><td>88.7</td><td>82.6</td><td>1.3</td></tr><tr><td>1PerFBQ+Alarm</td><td>S2S+JT</td><td>92.8</td><td>97.6</td><td>88.0</td><td>80.0</td><td>0.9</td></tr><tr><td>1PerFBQ</td><td>S2S</td><td>92.1</td><td>95.6</td><td>83.3</td><td>80.0</td><td>0.3</td></tr><tr><td>1PerFB</td><td>S2S</td><td>90.4</td><td>85.7</td><td>47.3</td><td>92.5</td><td>27.0</td></tr><tr><td>1PerFB</td><td>S2S+DDA</td><td>92.1</td><td>96.3</td><td>84.0</td><td>92.5</td><td>26.0</td></tr><tr><td>1PerFB</td><td>BART+DDA</td><td>91.5</td><td>97.1</td><td>86.7</td><td>92.5</td><td>1.6</td></tr><tr><td>1PerFB</td><td>S2S+KD</td><td>91.9</td><td>98.1</td><td>89.3</td><td>92.5</td><td>0.2</td></tr><tr><td>1PerFB</td><td>S2S+KD+DDA</td><td>91.9</td><td>98.3</td><td>94.0</td><td>92.5</td><td>0.2</td></tr></table>" |
|
}, |
|
"TABREF12": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results on selected Reminder domain experiments in percentage.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF14": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results on selected Weather domain experiments in percentage.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF16": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results on all Alarm domain experiments. All metrics are percentages.", |
|
"content": "<table><tr><td>Data</td><td>Approach</td><td colspan=\"2\">BLEU SCORE ACCURACY TREE</td><td>Acceptability</td><td>DATA REDUCTION</td><td>TREEACC STDEV</td></tr><tr><td>ALL</td><td>S2S BASE</td><td>95.9</td><td>100</td><td>-</td><td>0.0</td><td>0.1</td></tr><tr><td>1PerCB</td><td>S2S</td><td>76.1</td><td>12.1</td><td>1.3</td><td>99.7</td><td>4.9</td></tr><tr><td>1PerMB</td><td>S2S</td><td>92.3</td><td>80.1</td><td>61.3</td><td>94.8</td><td>16.5</td></tr><tr><td>1PerFBQ</td><td>S2S</td><td>95.4</td><td>97.7</td><td>90.0</td><td>85.0</td><td>2.5</td></tr><tr><td>1PerFB</td><td>S2S</td><td>93.5</td><td>89.1</td><td>76.7</td><td>94.0</td><td>7.6</td></tr><tr><td>1PerFBQ</td><td>S2S+DDA</td><td>95.7</td><td>99.6</td><td>96.7</td><td>85.0</td><td>4.8</td></tr><tr><td>1PerFB</td><td>S2S+DDA</td><td>94.9</td><td>97.8</td><td>87.3</td><td>94.0</td><td>10.2</td></tr><tr><td>1PerFBQ</td><td>BART+DDA</td><td>95.5</td><td>99.6</td><td>98.0</td><td>85.0</td><td>0.2</td></tr><tr><td>1PerFB</td><td>BART+DDA</td><td>93.8</td><td>96.1</td><td>90.7</td><td>94.0</td><td>1.7</td></tr><tr><td>1PerFBQ</td><td>S2S+KD</td><td>95.8</td><td>99.9</td><td>98.0</td><td>85.0</td><td>0.1</td></tr><tr><td>1PerFB</td><td>S2S+KD</td><td>94.6</td><td>99.8</td><td>96.0</td><td>94.0</td><td>0.1</td></tr><tr><td colspan=\"2\">1PerFBQ S2S+KD+DDA</td><td>95.8</td><td>100.0</td><td>99.3</td><td>85.0</td><td>0.1</td></tr><tr><td>1PerFB</td><td>S2S+KD+DDA</td><td>94.6</td><td>100.0</td><td>98.6</td><td>94.0</td><td>0.1</td></tr></table>" |
|
}, |
|
"TABREF17": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results on all Time domain experiments. All metrics are percentages.", |
|
"content": "<table><tr><td>Data</td><td>Approach</td><td colspan=\"2\">BLEU SCORE ACCURACY TREE</td><td>Acceptability</td><td>DATA REDUCTION</td><td>TREEACC STDEV</td></tr><tr><td>ALL</td><td>S2S BASE</td><td>92.6</td><td>99.3</td><td>-</td><td>0.0</td><td>0.1</td></tr><tr><td>1PerCB</td><td>S2S</td><td>15.9</td><td>0.1</td><td>0.6</td><td>99.3</td><td>0.2</td></tr><tr><td>1PerMB</td><td>S2S</td><td>89.94</td><td>75.7</td><td>28.0</td><td>94.2</td><td>22.4</td></tr><tr><td>5PerMB+Alarm</td><td>S2S+JT</td><td>92.4</td><td>97.6</td><td>88.7</td><td>82.6</td><td>1.3</td></tr><tr><td>3PerFB+Alarm</td><td>S2S+JT</td><td>92.6</td><td>98.1</td><td>86.0</td><td>82.0</td><td>2.2</td></tr><tr><td>1PerFBQ+Alarm</td><td>S2S+JT</td><td>92.8</td><td>97.6</td><td>88.0</td><td>80.0</td><td>0.9</td></tr><tr><td>1PerFBQ</td><td>S2S</td><td>92.1</td><td>95.6</td><td>83.3</td><td>80.0</td><td>0.3</td></tr><tr><td>1PerFB</td><td>S2S</td><td>90.4</td><td>85.7</td><td>47.3</td><td>92.5</td><td>27.0</td></tr><tr><td>1PerFBQ</td><td>S2S+DDA</td><td>92.3</td><td>96.9</td><td>82.0</td><td>80.0</td><td>0.2</td></tr><tr><td>1PerFB</td><td>S2S+DDA</td><td>92.1</td><td>96.3</td><td>84.0</td><td>92.5</td><td>26.0</td></tr><tr><td>1PerFBQ</td><td>BART+DDA</td><td>92.1</td><td>98.3</td><td>93.3</td><td>80.0</td><td>0.2</td></tr><tr><td>1PerFB</td><td>BART+DDA</td><td>91.5</td><td>97.1</td><td>86.7</td><td>92.5</td><td>1.6</td></tr><tr><td>1PerFBQ</td><td>S2S+KD</td><td>92.6</td><td>98.7</td><td>92.0</td><td>80.0</td><td>0.2</td></tr><tr><td>1PerFB</td><td>S2S+KD</td><td>91.9</td><td>98.1</td><td>83.3</td><td>92.5</td><td>0.2</td></tr><tr><td>1PerFBQ</td><td>S2S+KD+DDA</td><td>92.6</td><td>98.9</td><td>96.0</td><td>80.0</td><td>0.2</td></tr><tr><td>1PerFB</td><td>S2S+KD+DDA</td><td>91.9</td><td>98.3</td><td>94.0</td><td>92.5</td><td>0.2</td></tr></table>" |
|
}, |
|
"TABREF18": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results on all Reminder domain experiments. All metrics are percentages.", |
|
"content": "<table><tr><td>Data</td><td>Approach</td><td colspan=\"3\">BLEU SCORE ACCURACY REDUCTION TREE DATA</td><td>TREEACC STDEV</td></tr><tr><td>ALL</td><td>S2S BASE</td><td>91.4</td><td>91.4</td><td>0.0</td><td>0.1</td></tr><tr><td>1PerCB</td><td>S2S</td><td>88.1</td><td>77.9</td><td>91.2</td><td>2.6</td></tr><tr><td>1PerMB</td><td>S2S</td><td>90.7</td><td>90.6</td><td>74.8</td><td>0.3</td></tr><tr><td>1PerFB</td><td>S2S</td><td>91.3</td><td>91.4</td><td>40.0</td><td>0.1</td></tr><tr><td>1PerFB</td><td>S2S+DDA</td><td>91.3</td><td>91.1</td><td>40.0</td><td>0.1</td></tr><tr><td>0.5PerFB</td><td>S2S+DDA</td><td>89.8</td><td>86.6</td><td>85.0</td><td>18.5</td></tr><tr><td>0.25PerFB</td><td>S2S+DDA</td><td>87.3</td><td>77.8</td><td>92.5</td><td>12.3</td></tr><tr><td>0.5PerFB</td><td>BART+DDA</td><td>90.2</td><td>89.9</td><td>85.0</td><td>1.7</td></tr><tr><td>0.25PerFB</td><td>BART+DDA</td><td>89.2</td><td>86.2</td><td>92.5</td><td>1.8</td></tr><tr><td>0.5PerFB</td><td>S2S+KD</td><td>90.8</td><td>90.9</td><td>85.0</td><td>0.1</td></tr><tr><td>0.25PerFB</td><td>S2S+KD</td><td>89.7</td><td>89.4</td><td>92.5</td><td>0.1</td></tr><tr><td>0.5PerFB</td><td>S2S+KD+DDA</td><td>90.8</td><td>91.0</td><td>85.0</td><td>0.1</td></tr><tr><td colspan=\"2\">0.25PerFB S2S+KD+DDA</td><td>89.8</td><td>89.8</td><td>92.5</td><td>0.1</td></tr></table>" |
|
}, |
|
"TABREF19": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results on all Weather domain experiments. All metrics are percentages.", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |