|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:55:03.223260Z" |
|
}, |
|
"title": "Task-Oriented Dialogue as Dataflow Synthesis", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Andreas", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bufe", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Burkett", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Clausman", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Crawford", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Crim", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "Deloach", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Leah", |
|
"middle": [], |
|
"last": "Dorner", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kristin", |
|
"middle": [], |
|
"last": "Hayes", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kellie", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Ho", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Wendy", |
|
"middle": [], |
|
"last": "Iwaszuk", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Smriti", |
|
"middle": [], |
|
"last": "Jha", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jayant", |
|
"middle": [], |
|
"last": "Krishnamurthy", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Theo", |
|
"middle": [], |
|
"last": "Lanman", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Lintsbakh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Mcgovern", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Aleksandr", |
|
"middle": [], |
|
"last": "Nisnevich", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Pauls", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Dmitrij", |
|
"middle": [], |
|
"last": "Petters", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Brent", |
|
"middle": [], |
|
"last": "Read", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Subhro", |
|
"middle": [], |
|
"last": "Roy", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Rusak", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Beth", |
|
"middle": [], |
|
"last": "Short", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Div", |
|
"middle": [], |
|
"last": "Slomin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Snyder", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Stephon", |
|
"middle": [], |
|
"last": "Striplin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Tellman", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Thomson", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Andrei", |
|
"middle": [], |
|
"last": "Vorobev", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Izabela", |
|
"middle": [], |
|
"last": "Witoszko", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Wolfe", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Abby", |
|
"middle": [], |
|
"last": "Wray", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yuchen", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Zotov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Semantic Machines", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We describe an approach to task-oriented dialogue in which dialogue state is represented as a dataflow graph. A dialogue agent maps each user utterance to a program that extends this graph. Programs include metacomputation operators for reference and revision that reuse dataflow fragments from previous turns. Our graph-based state enables the expression and manipulation of complex user intents, and explicit metacomputation makes these intents easier for learned models to predict. We introduce a new dataset, SMCalFlow, featuring complex dialogues about events, weather, places, and people. Experiments show that dataflow graphs and metacomputation substantially improve representability and predictability in these natural dialogues. Additional experiments on the MultiWOZ dataset show that our dataflow representation enables an otherwise off-the-shelf sequence-tosequence model to match the best existing task-specific state tracking model. The SM-CalFlow dataset, code for replicating experiments, and a public leaderboard are available at https://www.microsoft.com/enus/research/project/dataflowbased-dialogue-semantic-machines.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We describe an approach to task-oriented dialogue in which dialogue state is represented as a dataflow graph. A dialogue agent maps each user utterance to a program that extends this graph. Programs include metacomputation operators for reference and revision that reuse dataflow fragments from previous turns. Our graph-based state enables the expression and manipulation of complex user intents, and explicit metacomputation makes these intents easier for learned models to predict. We introduce a new dataset, SMCalFlow, featuring complex dialogues about events, weather, places, and people. Experiments show that dataflow graphs and metacomputation substantially improve representability and predictability in these natural dialogues. Additional experiments on the MultiWOZ dataset show that our dataflow representation enables an otherwise off-the-shelf sequence-tosequence model to match the best existing task-specific state tracking model. The SM-CalFlow dataset, code for replicating experiments, and a public leaderboard are available at https://www.microsoft.com/enus/research/project/dataflowbased-dialogue-semantic-machines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Two central design decisions in modern conversational AI systems are the choices of state and action representations, which determine the scope of possible user requests and agent behaviors. Dialogue systems with fixed symbolic state representations (like slot filling systems) are easy to train but hard to extend (Pieraccini et al., 1992) . Deep continuous state representations are flexible enough to represent arbitrary properties of the dialogue history, but so unconstrained that training a User: Where is my meeting at 2 this afternoon?", |
|
"cite_spans": [ |
|
{ |
|
"start": 315, |
|
"end": 340, |
|
"text": "(Pieraccini et al., 1992)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "User: Can you create a meeting with Megan right before that starts?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "User: Megan Bowen.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "place(findEvent(EventSpec(start=pm(2)))) createEvent (EventSpec( end=start(refer(Constraint[Event] ())), attendee=PersonSpec(name='Megan') )) revise( new=PersonSpec(name='Megan Bowen'), oldLoc=Constraint[PersonSpec](), rootLoc=RoleConstraint(output) ) pm neural dialogue policy \"end-to-end\" fails to learn appropriate latent states (Bordes et al., 2016) . This paper introduces a new framework for dialogue modeling that aims to combine the strengths of both approaches: structured enough to enable efficient learning, yet flexible enough to support open-ended, compositional user goals that involve multiple tasks and domains. The framework has two components: a new state representation in which dialogue states are represented as dataflow graphs; and a new agent architecture in which dialogue agents predict compositional programs that extend these graphs. Over the course of a dialogue, a growing dataflow graph serves as a record of common ground: an executable description of the entities that were mentioned and the actions and computations that produced them ( Figure 1 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 98, |
|
"text": "(EventSpec( end=start(refer(Constraint[Event]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 353, |
|
"text": "(Bordes et al., 2016)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1070, |
|
"end": 1078, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "While this paper mostly focuses on representational questions, learning is a central motivation for our approach. Learning to interpret naturallanguage requests is simpler when they are understood to specify graph-building operations. Human speakers avoid repeating themselves in conversation by using anaphora, ellipsis, and bridging to build on shared context (Mitkov, 2014) . Our framework treats these constructions by translating them into explicit metacomputation operators for reference and revision, which directly retrieve fragments of the dataflow graph that represents the shared dialogue state. This approach borrows from corresponding ideas in the literature on program transformation (Visser, 2001) and results in compact, predictable programs whose structure closely mirrors user utterances.", |
|
"cite_spans": [ |
|
{ |
|
"start": 362, |
|
"end": 376, |
|
"text": "(Mitkov, 2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 698, |
|
"end": 712, |
|
"text": "(Visser, 2001)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Experiments show that our rich dialogue state representation makes it possible to build better dialogue agents for challenging tasks. First, we release a newly collected dataset of around 40K natural dialogues in English about calendars, locations, people, and weather-the largest goaloriented dialogue dataset to date. Each dialogue turn is annotated with a program implementing the user request. Many turns involve more challenging predictions than traditional slot-filling, with compositional actions, cross-domain interaction, complex anaphora, and exception handling (Figure 2) . On this dataset, explicit reference mechanisms reduce the error rate of a seq2seq-withcopying model (See et al., 2017) by 5.9% on all turns and by 10.9% on turns with a cross-turn reference. To demonstrate breadth of applica-bility, we additionally describe how to automatically convert the simpler MultiWOZ dataset into a dataflow representation. This representation again enables a basic seq2seq model to outperform a state-of-the-art, task-specific model at traditional state tracking. Our results show that within the dataflow framework, a broad range of agent behaviors are both representable and learnable, and that explicit abstractions for reference and revision are the keys to effective modeling.", |
|
"cite_spans": [ |
|
{ |
|
"start": 572, |
|
"end": 582, |
|
"text": "(Figure 2)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 685, |
|
"end": 703, |
|
"text": "(See et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This section provides a high-level overview of our dialogue modeling framework, introducing the main components of the approach. Sections 3-5 refine this picture, describing the implementation and use of specific metacomputation operators.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We model a dialogue between a (human) user and an (automated) agent as an interactive programming task where the human and computer communicate using natural language. Dialogue state is represented with a dataflow graph. At each turn, the agent's goal is to translate the most recent user utterance into a program. Predicted programs nondestructively extend the dataflow graph, construct any newly requested values or real-world side-effects, and finally describe the results to the user. Our approach is significantly different from a conventional dialogue system pipeline, which has separate modules for language understanding, dialogue state tracking, and dialogue policy execution (Young et al., 2013) . Instead, a single learned model directly predicts executable agent actions and logs them in a graphical dialogue state.", |
|
"cite_spans": [ |
|
{ |
|
"start": 685, |
|
"end": 705, |
|
"text": "(Young et al., 2013)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Programs, graphs, and evaluation The simplest example of interactive program synthesis is question answering:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "User:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "When is the next retreat?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "start(findEvent(EventSpec( name= retreat , start=after(now()))))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Agent: It starts on April 27 at 9 am.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Here the agent predicts a program that invokes an API call (findEvent) on a structured input (EventSpec) to produce the desired query. 1 This is a form of semantic parsing (Zelle, 1995) . The program predicted above can be rendered as a dataflow graph: Each function call in the program corresponds to a node labeled with that function. This node's parents correspond to the arguments of the function call. The top-level call that returns the program's result is depicted with a solid border. A dataflow graph is always acyclic, but is not necessarily a tree, as nodes may be reused.", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 185, |
|
"text": "(Zelle, 1995)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Once nodes are added to a dataflow graph, they are evaluated in topological order. Evaluating a node applies its function to its parents' values:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "DateTime(2020, . . .) findEvent 'retreat' now start start name after EventSpec DateTimeSpec(. . .)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Here we have annotated two nodes to show that the value of after(now()) is a DateTimeSpec and the value of the returned start node is a specific DateTime. Evaluated nodes are shaded in our diagrams. Exceptions (see \u00a75) block evaluation, leaving downstream nodes unevaluated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The above diagram saves space by summarizing the (structured) value of a node as a string. In reality, each evaluated node has a dashed result edge that points to the result of evaluating it: That result is itself a node in the dataflow graphoften a new node added by evaluation. It may have its own result edge. 2 A node's value is found by transitively following result edges until we arrive its hearer). The fact that next in this context triggered a search for \"events after now\" was learned from annotations. See \u00a76 for a discussion of how these annotations are standardized in the SMCalFlow dataset. 2 In other words, a function does not have to return a terminal node. Its result may be an existing node, as we will see in \u00a73. Or it may be a new non-terminal node, i.e., the root of a subgraph that implements the function. The new nodes in the subgraph are then evaluated further, giving them result edges, although they also remain available for reference and revision. Of course, a library function such as findEvent or at a node whose result is itself. Such a terminal node is either a primitive value (e.g., 2020), or a constructor (e.g., DateTime) whose parent nodes' values specify its arguments. A constructor has the same (capitalized) name as the type it constructs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 606, |
|
"end": 607, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview: Dialogue and Dataflow", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We now sketch two metacomputation functions whose evaluation extends the graph in complex ways.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As a representation of dialogue state, the dataflow graph records entities that have been mentioned and the relationships among them. All nodes in the dataflow graph are eligible to be referenced by subsequent utterances. Suppose, for example, that the user continues the previous dialogue fragment with a follow-up question:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "User:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "What day of the week is that?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "dayOfWeek(refer())", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Agent: It's a Monday.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The user's word that becomes the refer call in our predicted program, as it is a reference to \"some salient previously mentioned node. + that invokes an API will generally return its value directly as a terminal node. However, translating natural language to higher-level function calls, which have been defined to expand into lower-level library calls (reminiscent of macro expansion), is often more easily learnable and more maintainable than translating it directly to the expanded graph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A \"new\" DateTimeSpec (representing in 2021) is to be substituted for some salient existing old node that has value type DateTimeSpec (in this case, the node after(now())). As in the refer example, the target program (though not the above subgraph) corresponds closely to the user's new utterance, making it easy to predict. Like the utterance itself, the program does not specify the revised subgraph in full, but describes how to find and reuse relevant structure from the previous dataflow graph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Given a dataset of turns expressed in terms of appropriate graph-manipulation programs, the learning problem for a dataflow agent is the same as for any other supervised contextual semantic parser. We want to learn a function that maps user utterances to particular programs-a well-studied task for which standard models exist. Details of the model used for our experiments in this paper are provided in \u00a77.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Aside: response generation This paper focuses on language understanding: mapping from a user's natural language utterance to a formal response, in this case the value of the outlined node returned by a program. Dialogue systems must also perform language generation: mapping from this formal response to a natural-language response. The dataset released with this paper includes output from a learned generation model that can describe the value computed at a previous turn, describe the structure of the computation that produced the value, and reference other nodes in the dataflow graph via referring expressions. Support for structured, computation-conditional generation models is another advantage of dataflowbased dialogue state representations. While a complete description of dataflow-based language generation is beyond the scope of this paper, we briefly describe the components of the generation system relevant to the understanding system presented here.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The generation model is invoked after the evaluation phase. It conditions on a view of the graph rooted at the most recent return node, so generated responses can mention both the previously returned value and the computation that produced it. As the generation model produces the natural language response, it extends the dataflow graph. For example, if after the user query \"What's the date of the next retreat?\" the agent responds:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Agent: It starts on April 27 at 9 am, and runs for 8 hours.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "then it will also extend the dataflow graph to reflect that the event's duration was mentioned: The duration of the event is now part of the common ground in the conversation and available for future reference by either the agent or the user. The generation model is also important for agent initiative:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "User:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Put an event on my calendar.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Agent: What should it be called?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As discussed in detail in \u00a75, questions of this kind can be generated in response to exceptions generated by underspecified user requests. In the accompanying dataset release, the agent's utterances are annotated with their dataflow graphs as extended by the generation model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference and revision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In a dialogue, entities that have been introduced once may be referred to again. In dataflow dialogues, the entities available for reference are given by the nodes in the dataflow graph. Entities are salient to conversation participants to different degrees, and their relative salience determines the ways in which they may be referenced (Lappin and Leass, 1994) . For example, it generally refers to the most salient non-human entity, while more specific expressions like the Friday meeting are needed to refer to accessible but less salient entities. Not all references to entities are overt: if the agent says \"You have a meeting tomorrow\" and the user responds \"What time?\", the agent must predict the implicit reference to a salient event.", |
|
"cite_spans": [ |
|
{ |
|
"start": 339, |
|
"end": 363, |
|
"text": "(Lappin and Leass, 1994)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference resolution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We have seen that refer is used to find referents for referring expressions. In general, these referents may be existing dataflow nodes or new subgraphs for newly mentioned entities. We now give more detail about both cases. Imagine a dialogue in which the dataflow graph contains the following fragment (which translates a mention of Easter or answers When is Easter?): Suppose the user subsequently mentions the day after that. We wish to produce this computation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataflow pointers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Days 1 + findDateTime easter", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataflow pointers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our framework, this is accomplished by mapping the day after that to +(refer(), Days(1)). The corresponding graph is not quite the one shown above, but it evaluates to the same value: This shows how the refer() call is reified as a node in the dataflow graph. Its result is the salient findDateTime node from the previous turnwhose own result, a specific DateTime, now serves as the value of refer. We show both result edges here. Evaluating + adds a day to this old DateTime value to get the result of +, a new DateTime.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataflow pointers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To enable dataflow graph manipulation with referring expressions, all that is required is an implementation of refer that can produce appropriate pointers for both simple references (that) and complex ones (the first meeting).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataflow pointers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Constraints A call to refer is essentially a query that retrieves a node from the dialogue history, using a salience model discussed below. refer takes an optional argument: a constraint on the returned node. Indeed, the proper translation of that in the context the day after that would be refer ( \u2022 Role constraints: A role constraint specifies a keyword and matches nodes that are used as keyword arguments with that keyword. For example, the month maps to refer(RoleConstraint(month)) and resolves to the constant node apr in the dialogues in \u00a72, since that node was used as a named argument month=apr. We further allow keyword paths that select arguments of arguments: thurs in the previous bullet would satisfy", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataflow pointers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "refer(RoleConstraint([date,weekday])).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataflow pointers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To interpret a natural language referring expression, the program prediction model only needs to translate it into a contextually appropriate constraint C. refer(C) is then evaluated using a separate salience retrieval model that returns an appropriate node. The following dialogue shows referring expressions in action:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataflow pointers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "User:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataflow pointers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "What's happening this morning?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataflow pointers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "findEvent(EventSpec( start=and(today(), during(morning())))))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataflow pointers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Agent: You have a planning meeting at 9 am.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataflow pointers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "What do I have after that?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "findEvent(EventSpec(start=after(end( refer(Constraint[Event]())))))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Agent: Your birthday lunch is at 12:30 pm.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "User: How many people are coming to the 9 am meeting? length(attendees( refer(Constraint[Event](start=am(9))))) Agent: 5 people will be at the planning meeting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here the 9 am meeting refers to the one that is salient from the first response, not an arbitrary one.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Salience retrieval model The salience retrieval model returns the most salient node satisfying the underlying constraint. Our dataflow framework is agnostic to the implementation of this model. A sophisticated model could select nodes via a machine-learned scoring function. In the experiments in this paper, however, we rank nodes using a hard-coded heuristic. The heuristic chooses the root node r of the previous user utterance, if it satisfies the constraint. More generally, the heuristic prefers nodes to the extent that they can be reached from r in a small number of steps, where a step may move from a node to one of its input nodes, from an evaluated node to its result node, or from the root of an utterance to the root of an adjacent (user or system) utterance. If no satisfying node is found in the past several utterances, the heuristic falls back to generating code (see footnote 2) that will search harder for a satisfying salient entity, perhaps by querying a database. For example, our earlier Constraint[Event](start=am(9)) may return the expression findEvent(EventSpec(start=am(9))) if no 9 am meeting has been mentioned recently, and Constraint[Person](name='Adam') may return findPerson(PersonSpec(name='Adam')) if no Adam has been mentioned. (See footnote 4.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Beyond referring to previously mentioned entities (nodes), task-oriented dialogues frequently refer to previously executed computations (subgraphs). This is one of the major advantages of representing the dialogue state as a dataflow graph of computations, not just a set of potentially salient entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Revision", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "What time on Tuesday is my planning meeting? start(findEvent(EventSpec( name= planning , start=DateTimeSpec(weekday=tuesday))))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Agent: You meet with Grace at noon.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Sorry, I meant all-hands.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Agent: Your all-hands meeting is at 2:30 pm.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The second user utterance asks for the computation from the first user utterance to be repeated, but with all-hands in place of planning. The expected result is still a time, even though the second utterance makes no mention of time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the dataflow framework, we invoke a revise operator to construct the revised computation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "User: Sorry, I meant all-hands.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "revise(rootLoc=RoleConstraint(output), oldLoc=Constraint[String](), new= all-hands )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Again, the content of the program closely reflects that of the corresponding utterance. The revise operator takes three arguments:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 rootLoc, a constraint to find the top-level node of the original computation; \u2022 oldLoc, a constraint on the node to replace within the original computation; \u2022 new, a new graph fragment to substitute there.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The revise node evaluates to the root of a modified copy of the original computation, in which new now fills the role at the \"old\" location. Normally rootLoc is RoleConstraint(output), which selects a node returned by a previous user utterance; thus, we revise that entire original request.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Revision is non-destructive-no part of the dialogue history is lost, so entities computed by the original target and its ancestors remain available for later reference. However, the copy shares nodes with the original computation where possible, to avoid introducing unnecessary duplicate nodes that would have to be considered by refer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For the example dialogue at the beginning of this section, the first turn produces the light gray nodes below. The second turn adds the darker gray nodes, which specify the desired revision. Finally, evaluating the revise node selects the salient locations that match the rootLoc and oldLoc constraints (indicated in the above drawing by temporary dotted lines), and constructs the revised subgraph (the new start node below and its ancestors). The result of evaluation (dashed arrow below) is the root of the revised subgraph. Finally, evaluating these new nodes as well will establish that the value of the top-level revise is the start time of the 'all-hands' meeting on Tuesday.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the following example, the second utterance asks to replace a date specification. However, the utterance appears in a context where the relevant DateTimeSpec-valued node to be \"replaced\" is an argument that has actually not yet been provided:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "User:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "When is lunch?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "start(findEvent(EventSpec(name= lunch )))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Agent: It's at 1 pm.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "What about tomorrow?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "revise( rootLoc=RoleConstraint(output), oldLoc=Constraint[DateTimeSpec], new=tomorrow())", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The revision replaces the missing start argument to the previous EventSpec (whose absence had resulted in some default behavior) with an explicit argument (the DateTimeSpec returned by tomorrow()). To achieve this, when the salience retrieval model is run with an oldLoc constraint, it must be able to return missing arguments that satisfy that constraint. Missing arguments are implicitly present, with special value missing of the appropriate type. In practice they are created on demand. Relatedly, a user utterance sometimes modifies a previously mentioned constraint such as an EventSpec (see footnote 4). To permit this and more, we allow a more flexible version of revise to (non-destructively) transform the subgraph at oldLoc by applying a function, rather than by substituting a given subgraph new. Such functions are similar to rewrite rules in a term rewriting system (Klop, 1990) , with the oldLoc argument supplying the condition. Our dataset ( \u00a76) makes heavy use of reviseConstraint calls, which modify a constraint as directed, while weakening it if necessary so that it remains satisfiable. For example, if a 3:00-3:30 meeting is onscreen and the user says make it 45 minutes or make it longer, then the agent can no longer preserve previous constraints start=3:00 and end=3:30; one must be dropped.", |
|
"cite_spans": [ |
|
{ |
|
"start": 880, |
|
"end": 892, |
|
"text": "(Klop, 1990)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "While the examples in this section involve a single update, real-world dialogues ( \u00a76) can involve single user requests built up over as many as five turns with unrelated intervening discussion. Revisions of revisions or of constraints on reference are also seamlessly handled: revise can take another revise or a refer node as its target, leading to a longer chain of result edges (dashed lines) to follow. Coordination of interactions among this many long-range dependencies remains a challenge even for modern attentional architectures (Bordes et al., 2016) . With revise all the needed information is in one place; as experiments will show, this is crucial for good performance in more challenging dialogues.", |
|
"cite_spans": [ |
|
{ |
|
"start": 539, |
|
"end": 560, |
|
"text": "(Bordes et al., 2016)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Sometimes users make requests that can be fulfilled only with the help of followup exchanges, if at all. Requests might be incomplete:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recovery", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "User:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recovery", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Book a meeting for me.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recovery", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Agent: When should the meeting start?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recovery", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "referentially ambiguous:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recovery", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "User: Who is coming to the planning meeting?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recovery", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Agent: Susan Chu and Susan Brown.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recovery", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "What is Susan's email?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "or have no identifiable referent (a presupposition failure):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "User: When is my first meeting on February 30?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our solution is to treat such discourse failures as exceptions. In principle, they are no different from other real-world obstacles to fulfilling the user's request (server errors, declined credit cards, and other business logic). To be useful, a dialogue model must have some way to recover from all these exceptions, describing the problem to the user and guiding the dialogue past it. Our dialogue manager consists mainly of an exception recovery mechanism. This contrasts with traditional slot-filling systems, where a scripted policy determines which questions to ask the user and in which order. Scripted policies are straightforward but cannot handle novel compositional utterances. Contextual semantic parsers treat compositionality, but provide no dialogue management mechanism at all. Our dataflow-based approach allows the user to express complex compositional intents, but also allows the agent to reclaim the initiative when it is unable to make progress. Specifically, the agent can elicit interactive repairs of the problematic user plan: the user communicates such repairs through the reference and revision mechanisms described in preceding sections.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Exceptions in execution In the dataflow graph framework, failure to interpret a user utterance is signaled by exceptions, which occur during evaluation. The simplest exceptions result from errors in function calls and constructors: An exception is essentially just a special result (possibly a structured value) returned by evaluation. It appears in the dataflow graph, so the agent can condition on it when predicting programs in future turns. When an exception occurs, the generation model ( \u00a72) is invoked on the exceptional node. This can be used to produce prompts like:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Agent: There is no 30th of February. Did you mean some other date?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "At this point, recovering from the exception looks like any other revision step: the user supplies a new value, and the agent simply needs to patch it into the right location in the dataflow graph. There are several answers the user could make, indicating repairs at different locations: The fact that exception recovery looks like any other turn-level prediction is another key advantage of dataflow-based state representations. In the above examples, the user specified a revision that would enable them to continue, but they also would have been free to try another utterance (List all my meetings in February) or to change goals altogether (Never mind, let's schedule a vacation).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "User: I meant", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Because of its flexibility, our exceptionhandling mechanism is suitable for many situations that have not traditionally been regarded as exceptions. For example, an interactive slot-filling workflow can be achieved via a sequence of underspecified constructors, each triggering an exception and eliciting a revision from the user: The agent predicted that the user intended to revise the missing name because an exception involving the name path appeared in the dialogue history on the previous turn. Recovery behaviors are enabled by the phase separation between constructing the dataflow graph (which is the job of program synthesis from natural language) and evaluating its nodes. The dataflow graph always contains a record of the user's current goal, even when the goal could not be successfully evaluated. This goal persists across turns and remains accessible to reference, and thus can be interactively refined and clarified using the same metacomputation operations as user-initiated revision. Exception handling influences the course of the dialogue, without requiring a traditional hand-written or learned \"dialogue policy\" that reasons about full dialogue states. Our policy only needs to generate language (recall \u00a72) that reacts appropriately to any exception or exceptions in the evaluation of the most recent utterance's program, just as it reacts to the ordinary return value in the case where evaluation succeeds.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To validate our approach, we crowdsourced a large English dialogue dataset, SMCalFlow, featuring task-oriented conversations about calendar events, weather, places, and people. Figure 2 has an example. SMCalFlow has several key characteristics:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 185, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Richly annotated: Agent responses are executable programs, featuring API calls, function composition, and complex constraints built from strings, numbers, dates and times in a variety of formats. They are not key-value structures or database queries, but instead full descriptions of the runtime behavior needed to react to the user in a real, grounded dialogue system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Open-ended: We did not constrain crowdworkers to scripts. Instead, they were given general information about agent capabilities and were encouraged to interact freely. A practical dialogue system must also recognize and respond to out-ofscope requests. Our dataset includes many such examples (see the fourth user turn in Figure 2 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 322, |
|
"end": 330, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Cross-domain: SMCalFlow spans four major domains: calendar, weather, places, and people. Cross-domain interaction is pervasive (Figure 2 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 136, |
|
"text": "(Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "To cover a rich set of back-end capabilities while encouraging worker creativity, we designed a wide range of scenarios to guide dialogue construction. There are over 100 scenarios of varying topic and granularity. Dialogues are collected via a Wizard-of-Oz process. Every dialogue is associated with a scenario. At each turn, a crowdworker acting as the user is presented with a dialogue as context and is asked to append a new utterance. An annotator acting as the agent labels the utterance with a program (which may include refer and revise) and then selects a natural-language response from a set of candidates produced by the language generation model described in \u00a72. Figure 2 : A sample annotated dialogue in SMCalFlow. Turn 1 features free-text subject and date/time. Turn 2 features reviseConstraint. Turn 3 features crossdomain interaction via refer and nested API calls (findPlace and weatherQueryApi are both real-world APIs). Turn 4 features an out-of-scope utterance that is parried by a category-appropriate \"fencing\" response. Turn 5 confirms a proposal after intervening turns.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 675, |
|
"end": 683, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "annotation interface includes an autocomplete feature based on existing annotations. Annotators also populate databases of people and events to ensure that user requests have appropriate responses. The process is iterated for a set number of turns or until the annotator indicates the end of conversation. A single dialogue may include turns from multiple crowdworkers and annotators. Annotators are provided with detailed guidelines containing example annotations and information about available library functions. Guidelines also specify conventions for pragmatic issues like the decision to annotate next as after at the beginning of \u00a72. Crowdworkers are recruited from Amazon Mechanical Turk with qualification requirements such as living in the United States and with a work approval rate higher than 95%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Data is split into training, development, and test sets. We review every dialogue in the test set with two additional annotators. 75% of turns pass through this double review process with no changes, which serves as an approximate measure of inter-annotator consensus on full programs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "For comparison, we also produce a version of the popular MultiWOZ 2.1 dataset (Budzianowski et al., 2018; with dataflow-based annotations. MultiWOZ is a state tracking task, so in its original format the dataset annotates each turn with a dialogue state rather than an executable representation. To obtain an equivalent (programbased) representation for MultiWOZ, at each user turn we automatically convert the annotation to a dataflow program. 5 Specifically, we represent each non-empty dialogue state as a call to an event booking function, find, whose argument is a Constraint that specifies the desired type of booking along with values for some of that type's slots. Within a dialogue, any turn that initiates a new type of booking is re-annotated as a call to find. Turns that merely modify some of the slots are reannotated as reviseConstraint calls. Within either kind of call, any slot value that does not appear as a substring of the user's current utterance (all slot values in MultiWOZ are utterance substrings) is re-annotated as a call to refer with an appropriate type constraint, provided that the reference resolution heuristic would retrieve the correct string from earlier in the dataflow. This covers references like the same day. Otherwise, our re-annotation retains the literal string value.", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 105, |
|
"text": "(Budzianowski et al., 2018;", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Data statistics are shown in Table 1 . To the best of our knowledge, SMCalFlow is the largest annotated task-oriented dialogue dataset to date. Compared to MultiWOZ, it features a larger user vocabulary, a more complex space of statemanipulation primitives, and a long tail of agent programs built from numerous function calls and deep composition.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 36, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We evaluate our approach on SMCalFlow and MultiWOZ 2.1. All experiments use the Open- 5 We release the conversion script along with SMCalFlow.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 87, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "NMT (Klein et al., 2017) pointer-generator network (See et al., 2017) , a sequence-to-sequence model that can copy tokens from the source sequence while decoding. Our goal is to demonstrate that dataflow-based representations benefit standard neural model architectures. Dataflowspecific modeling might improve on this baseline, and we leave this as a challenge for future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 24, |
|
"text": "(Klein et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 51, |
|
"end": 69, |
|
"text": "(See et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "For each user turn i, we linearize the target program into a sequence of tokens z i . This must be predicted from the dialogue contextnamely the concatenated source sequence", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "x i\u2212c z i\u2212c \u2022 \u2022 \u2022 x i\u22121 z i\u22121 x i (for SMCalFlow) or x i\u2212c y i\u2212c \u2022 \u2022 \u2022 x i\u22121 y i\u22121 x i (for MultiWOZ 2.1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Here c is a context window size, x j is the user utterance at user turn j, y j is the agent's naturallanguage response, and z j is the linearized agent program. Each sequence x j , y j , or z j begins with a separator token that indicates the speaker (user or agent). Our formulation of context for Multi-WOZ is standard (e.g., Wu et al., 2019) . We take the source and target vocabularies to consist of all words that occur in (respectively) the source and target sequences in training data, as just defined.", |
|
"cite_spans": [ |
|
{ |
|
"start": 328, |
|
"end": 344, |
|
"text": "Wu et al., 2019)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The model is trained using the Adam optimizer (Kingma and Ba, 2015) with the maximum likelihood objective. We use 0.001 as the learning rate. Training ends when there have been two different epochs that increased the development loss.", |
|
"cite_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 67, |
|
"text": "(Kingma and Ba, 2015)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We use Glove800B-300d (cased) and Glove6B-300d (uncased) (Pennington et al., 2014) to initialize the vocabulary embeddings for the SM-CalFlow and MultiWoZ experiments, respectively. The context window size c, hidden layer size d, number of hidden layers l, and dropout rates r are selected based on the agent action accuracy (for SMCalFlow) or dialogue-level exact match (for MultiWoZ) on the development set from {2, 4, 10}, {256, 300, 320, 384}, {1, 2, 3}, {0.3, 0.5, 0.7} respectively. Approximate 1-best decoding uses a beam of size 5. Table 2 shows results for the SMCalFlow dataset. We report program accuracy: specifically, exact-match accuracy of the predicted program after inlining metacomputation (i.e., replacing all calls to metacomputation operations with the concrete program fragments they return). 6 We also compare to baseline mod- 1: Dataset statistics. \"Library Size\" counts distinct function names (e.g., findEvent) plus keyword names (e.g., start=). \"Length\" and \"Depth\" columns show (.25, .50, .75) quantiles. For programs, \"Length\" is the number of function calls and \"Depth\" is determined from a tree-based program representation. \"OOS\" counts the outof-scope utterances. MultiWOZ statistics were calculated after applying the data processing of Wu et al. (2019) . Vocabulary size is less than reported by els that train on inlined metacomputation. These experiments make it possible to evaluate the importance of explicit dataflow manipulation compared to a standard contextual semantic parsing approach to the task: a no-metacomputation baseline can still reuse computations from previous turns via the model's copy mechanism. For the full representation, c, d, l, and r are 2, 384, 2, and 0.5, respectively. For the inline variant, they are 2, 384, 3, and 0.5. Turn-level exact match accuracy is around 73% for the development set and 67% for the test set. Inlining metacomputation, which forces the model to explicitly resolve cross-turn computation, reduces accuracy by 5.9% overall, 10.9% on turns involving references, and 9.1% on turns involving revision. Dataflow-based metacomputation operations are thus essential for good model performance in all three cases.", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 82, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 815, |
|
"end": 816, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1006, |
|
"end": 1021, |
|
"text": "(.25, .50, .75)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1271, |
|
"end": 1287, |
|
"text": "Wu et al. (2019)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 540, |
|
"end": 547, |
|
"text": "Table 2", |
|
"ref_id": "TABREF12" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We further evaluate our approach on dialogue state tracking using MultiWOZ 2.1. Table 3 shows results. For the full representation, the selected model uses c = 2, d = 384, l = 2, and r = 0.7. For the inline refer variant, they are 4, 320, 3, and 0.3. For the variant that inlines both refer and revise calls, they are 10, 320, 2, and 0.7. Even without metacomputation, prediction of programbased representations gives results comparable to the existing state of the art, TRADE, on the standard \"Joint Goal\" metric (turn-level exact match).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 80, |
|
"end": 87, |
|
"text": "Table 3", |
|
"ref_id": "TABREF14" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Quantitative evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "baselines that do not have access to pre-defined constraint transformation logic. (Wu et al., 2019) results are from the public implementation. \"Joint Goal\" (Budzianowski et al., 2018) is average dialogue state exact-match, \"Dialogue\" is average dialogue-level exact-match, and \"Prefix\" is the average number of turns before an incorrect prediction. Within each column, the best result is boldfaced, along with all results that are not significantly worse (p < 0.05, paired permutation test). Moreover, all of \"Dataflow,\" \"inline refer,\" and \"inline both\" have higher dialogue accuracy than TRADE (p < 0.005).", |
|
"cite_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 99, |
|
"text": "(Wu et al., 2019)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 184, |
|
"text": "(Budzianowski et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(Our dataflow representation for MultiWOZ is designed so that dataflow graph evaluation produces native MultiWOZ slot-value structures.) However, Joint Goal does not fully characterize the effectiveness of a state tracking system in real-world interactions, as it allows the model to recover from an error at an earlier turn by conditioning on gold agent utterances after the error. We thus evaluate on dialogue-level exact match and prefix length (the average number of turns until an error). On these metrics the benefit of dataflow over past approaches is clearer. Differences within dataflow model variants are smaller here than in Table 2 . For the Joint Goal metric, the no-metacomputation baseline is better; we attribute this to the comparative simplicity of reference in the MultiWOZ dataset. In any case, casting the state-tracking problem as one of program prediction with appropriate primitives gives a state-of-the-art statetracking model for MultiWOZ using only off-theshelf sequence prediction tools. Error analysis Beyond the quantitative results shown in Tables 2-3, we manually analyzed 100 SMCalFlow turns where our model mispredicted. Table 4 breaks down the errors by type. Three categories involve straightforward parsing errors. In underprediction errors, the model fails to predict some computation (e.g., a search constraint or property extractor) specified in the user request. This behavior is not specific to our system: under-length predictions are also welldocumented in neural machine translation systems (Murray and Chiang, 2018) . In entity linking errors, the model correctly identifies the presence of an entity mention in the input utterance, but uses it incorrectly in the input plan. Sometimes the entity that appears in the plan is hallucinated, appearing nowhere in the utterance; sometimes the entity is cast to a wrong type (e.g., locations interpreted as event names) used in the wrong field or extracted with wrong boundaries. In fencing errors, the model interprets an out-of-scope user utterance as an interpretable command, or vice-versa versions of the full dataset, and inlined and non-inlined versions of our model's test set predictions, enabling side-byside comparisons and experiments with alternative representations. We provide full conversion scripts for MultiWOZ. (compare to Figure 2, turn 4) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 1536, |
|
"end": 1561, |
|
"text": "(Murray and Chiang, 2018)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 636, |
|
"end": 643, |
|
"text": "Table 2", |
|
"ref_id": "TABREF12" |
|
}, |
|
{ |
|
"start": 1155, |
|
"end": 1162, |
|
"text": "Table 4", |
|
"ref_id": "TABREF16" |
|
}, |
|
{ |
|
"start": 2333, |
|
"end": 2350, |
|
"text": "Figure 2, turn 4)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Quantitative evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The fourth category, ambiguity errors, is more interesting. In these cases, the predicted plan corresponds to an interpretation of the user utterance that would be acceptable in some discourse context. In a third of these cases, this interpretation is ruled out by either dialogue context (e.g., interpreting what's next? as a request for the next list item rather than the event with the next earliest start time) or commonsense knowledge (make it at 8 means 8 a.m. for a business meeting and 8 p.m. for a dance party). In the remaining cases, the predicted plan expresses an alternative computation that produces the same result, or an alternative interpretation that is also contextually appropriate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The view of dialogue as an interactive process of shared plan synthesis dates back to Grosz and Sidner's earliest work on discourse structure (1986; 1988) . That work represents the state of a dialogue as a predicate recognizing whether a desired piece of information has been communicated or change in world state effected. Goals can be refined via questions and corrections from both users and agents. The only systems to attempt full versions of this shared-plans framework (e.g., Allen et al., 1996; Rich et al., 2001 ) required inputs that could be parsed under a predefined grammar. Subsequent research on dialogue understanding has largely focused on two simpler subtasks:", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 154, |
|
"text": "(1986; 1988)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 484, |
|
"end": 503, |
|
"text": "Allen et al., 1996;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 521, |
|
"text": "Rich et al., 2001", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Contextual semantic parsing approaches focus on complex language understanding without reasoning about underspecified goals or agent initiative. Here the prototypical problem is iterated question answering (Hemphill et al., 1990; Yu et al., 2019b) , in which the user asks a sequence of questions corresponding to database queries, and results of query execution are presented as structured result sets. Vlachos and Clark (2014) describe a semantic parsing representation targeted at more general dialogue problems. Most existing methods interpret context-dependent user questions (What is the next flight to Atlanta? When does it land?) by learning to copy subtrees (Zettlemoyer and Collins, 2009; Iyyer et al., 2017; Suhr et al., 2018) or tokens (Zhang et al., 2019) from previously-generated queries. In contrast, our approach reifies reuse with explicit graph operators.", |
|
"cite_spans": [ |
|
{ |
|
"start": 206, |
|
"end": 229, |
|
"text": "(Hemphill et al., 1990;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 247, |
|
"text": "Yu et al., 2019b)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 667, |
|
"end": 698, |
|
"text": "(Zettlemoyer and Collins, 2009;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 699, |
|
"end": 718, |
|
"text": "Iyyer et al., 2017;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 719, |
|
"end": 737, |
|
"text": "Suhr et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 748, |
|
"end": 768, |
|
"text": "(Zhang et al., 2019)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Slot-filling approaches (Pieraccini et al., 1992 ) model simpler utterances in the context of full, in-teractive dialogues. It is assumed that any user intent can be represented with a flat structure consisting of a categorical dialogue act and a mapping between a fixed set of slots and string-valued fillers. Existing fine-grained dialogue act schemes (Stolcke et al., 2000) can distinguish among a range of communicative intents not modeled by our approach, and slot-filling representations have historically been easier to predict (Zue et al., 1994) and annotate (Byrne et al., 2019) . But while recent variants support interaction between related slots (Budzianowski et al., 2018) and fixed-depth hierarchies of slots (Gupta et al., 2018) , modern slot-filling approaches remain limited in their support for semantic compositionality. By contrast, our approach supports user requests corresponding to general compositional programs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 48, |
|
"text": "(Pieraccini et al., 1992", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 354, |
|
"end": 376, |
|
"text": "(Stolcke et al., 2000)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 535, |
|
"end": 553, |
|
"text": "(Zue et al., 1994)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 567, |
|
"end": 587, |
|
"text": "(Byrne et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 658, |
|
"end": 685, |
|
"text": "(Budzianowski et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 723, |
|
"end": 743, |
|
"text": "(Gupta et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "More recent end-to-end dialogue agents attempt to map directly from conversation histories to API calls and agent utterances using neural sequence-to-sequence models without a representation of dialogue state (Bordes et al., 2016; Yu et al., 2019a) . While promising, models in these papers fail to outperform rule-or template-driven baselines. report greater success on a generation-focused task, and promising results have also been obtained from hybrid neuro-symbolic dialogue systems (Zhao and Eskenazi, 2016; Williams et al., 2017; . Much of this work is focused on improving agent modeling for existing representation schemes like slot filling. We expect that many modeling innovations (e.g., the neural entity linking mechanism proposed by Williams et al.) could be used in conjunction with the new representational framework we have proposed in this paper.", |
|
"cite_spans": [ |
|
{ |
|
"start": 209, |
|
"end": 230, |
|
"text": "(Bordes et al., 2016;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 248, |
|
"text": "Yu et al., 2019a)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 488, |
|
"end": 513, |
|
"text": "(Zhao and Eskenazi, 2016;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 514, |
|
"end": 536, |
|
"text": "Williams et al., 2017;", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Like slot-filling approaches, our framework is aimed at modeling full dialogues in which agents can ask questions, recover from errors, and take actions with side effects, all backed by an explicit state representation. However, our notions of \"state\" and \"action\" are much richer than in slot-filling systems, extending to arbitrary compositions of primitive operators. We use semantic parsing as a modeling framework for dialogue agents that can construct compositional states of this kind. While dataflow-based representations are widely used to model execution state for programming languages (Kam and Ullman, 1976) , this is the first work we are aware of that uses them to model conversational context and dialogue.", |
|
"cite_spans": [ |
|
{ |
|
"start": 597, |
|
"end": 619, |
|
"text": "(Kam and Ullman, 1976)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We have presented a representational framework for task-oriented dialogue modeling based on dataflow graphs, in which dialogue agents predict a sequence of compositional updates to a graphical state representation. This approach makes it possible to represent and learn from complex, natural dialogues. Future work might focus on improving prediction by introducing learned implementations of refer and revise that, along with the program predictor itself, could evaluate their hypotheses for syntactic, semantic, and pragmatic plausibility. The representational framework could itself be extended, e.g., by supporting declarative user goals and preferences that persist across utterances. We hope that the rich representations presented here-as well as our new dataset-will facilitate greater use of context and compositionality in learned models for task-oriented dialogue.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "Note that what the agent predicts is not a formal representation of the utterance's meaning, but a query that enables a contextually appropriate response (what Austin (1962) called the \"perlocutionary force\" of the utterance on", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Fortunately, this constraint need not be manually annotated. Given the rest of the program, it can be inferred automatically by Hindley-Milner type inference(Hindley, 1969;Milner, 1978), which establishes that this refer node must return a DateTime if the program is to type-check.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "refer and revise are not the only operations that take constraints as arguments. For example, constraint arguments to findEvent and createEvent specify what sort of event is to be retrieved or created. In these other contexts, to avoid distracting the reader, this paper uses EventSpec as an alias for Constraint[Event]. It similarly uses aliases PersonSpec and DateTimeSpec. (Our dataset does not.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Specifically, we inline all refer calls and revise calls that involve direct substitution of the kind described in \u00a74. We preserve reviseConstraint calls to avoid penalizing", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A note on reproducibility: Dependence on internal libraries prevents us from releasing a full salience model implementation and inlining script for SMCalFlow. The accompanying data release includes both inlined and non-inlined", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Tatsunori Hashimoto, Jianfeng Gao, and the anonymous TACL reviewers for feedback on early drafts of this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A robust system for natural spoken dialogue", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Allen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bradford", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Ringger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teresa", |
|
"middle": [], |
|
"last": "Sikorski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proceedings of the 34th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "62--70", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James F. Allen, Bradford W. Miller, Eric K. Ring- ger, and Teresa Sikorski. 1996. A robust system for natural spoken dialogue. In Proceedings of the 34th Annual Meeting of the Association for Computational Linguistics, pages 62-70. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "How to Do Things with Words. William James Lectures", |
|
"authors": [ |
|
{ |
|
"first": "Austin", |
|
"middle": [], |
|
"last": "John Langshaw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1962, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Langshaw Austin. 1962. How to Do Things with Words. William James Lectures. Oxford University Press. Edited by James O. Urmson. A second edition appeared in 1975.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Learning end-to-end goal-oriented dialog", |
|
"authors": [ |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y-Lan", |
|
"middle": [], |
|
"last": "Boureau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antoine Bordes, Y-Lan Boureau, and Jason We- ston. 2016. Learning end-to-end goal-oriented dialog. In Proceedings of the International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "MultiWOZ -A large-scale multi-domain Wizard-of-Oz dataset for task-oriented dialogue modelling", |
|
"authors": [ |
|
{ |
|
"first": "Pawel", |
|
"middle": [], |
|
"last": "Budzianowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsung-Hsien", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo-Hsiang", |
|
"middle": [], |
|
"last": "Tseng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I\u00f1igo", |
|
"middle": [], |
|
"last": "Casanueva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Ultes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Osman Ramadan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gasic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pawel Budzianowski, Tsung-Hsien Wen, Bo- Hsiang Tseng, I\u00f1igo Casanueva, Stefan Ultes, Osman Ramadan, and Milica Gasic. 2018. MultiWOZ -A large-scale multi-domain Wizard-of-Oz dataset for task-oriented dialogue modelling. In Proceedings of the Conference on Empirical Methods in Natural Language Pro- cessing.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Taskmaster-1: Toward a realistic and diverse dialog dataset", |
|
"authors": [ |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Byrne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Krishnamoorthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chinnadhurai", |
|
"middle": [], |
|
"last": "Sankar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Neelakantan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Goodrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Duckworth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Semih", |
|
"middle": [], |
|
"last": "Yavuz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Dubey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyu-Young", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Cedilnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing and the International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4516--4525", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bill Byrne, Karthik Krishnamoorthi, Chinnadhu- rai Sankar, Arvind Neelakantan, Ben Goodrich, Daniel Duckworth, Semih Yavuz, Amit Dubey, Kyu-Young Kim, and Andy Cedilnik. 2019. Taskmaster-1: Toward a realistic and diverse dialog dataset. In Proceedings of the Confer- ence on Empirical Methods in Natural Lan- guage Processing and the International Joint Conference on Natural Language Processing, pages 4516-4525, Hong Kong, China.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "MultiWOZ 2.1: A consolidated multi-domain dialogue dataset with state corrections and state tracking baselines", |
|
"authors": [ |
|
{ |
|
"first": "Mihail", |
|
"middle": [], |
|
"last": "Eric", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shachi", |
|
"middle": [], |
|
"last": "Paul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhishek", |
|
"middle": [], |
|
"last": "Sethi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanchit", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuyag", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.01669" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihail Eric, Rahul Goel, Shachi Paul, Abhishek Sethi, Sanchit Agarwal, Shuyag Gao, and Dilek Hakkani-Tur. 2019. MultiWOZ 2.1: A con- solidated multi-domain dialogue dataset with state corrections and state tracking baselines. arXiv:1907.01669 [cs.CL].", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Neural approaches to conversational AI. Foundations and Trends\u00ae in Information Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lihong", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "13", |
|
"issue": "", |
|
"pages": "127--298", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianfeng Gao, Michel Galley, Lihong Li, et al. 2019. Neural approaches to conversational AI. Foundations and Trends\u00ae in Information Re- trieval, 13(2-3):127-298.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "HyST: A hybrid approach for flexible and accurate dialogue state tracking", |
|
"authors": [ |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shachi", |
|
"middle": [], |
|
"last": "Paul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-T\u00fcr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Conference of the International Speech Communication Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rahul Goel, Shachi Paul, and Dilek Hakkani-T\u00fcr. 2019. HyST: A hybrid approach for flexible and accurate dialogue state tracking. In Pro- ceedings of the Conference of the International Speech Communication Association.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Attention, intentions, and the structure of discourse", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Barbara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Candace", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Grosz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sidner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1986, |
|
"venue": "Computational Linguistics", |
|
"volume": "12", |
|
"issue": "3", |
|
"pages": "175--204", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barbara J. Grosz and Candace L. Sidner. 1986. Attention, intentions, and the structure of dis- course. Computational Linguistics, 12(3):175- 204.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Plans for discourse", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Barbara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Candace", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Grosz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sidner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1988, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barbara J. Grosz and Candace L. Sidner. 1988. Plans for discourse. Technical report, BBN Laboratories.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Semantic parsing for task oriented dialog using hierarchical representations", |
|
"authors": [ |
|
{ |
|
"first": "Sonal", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rushin", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mrinal", |
|
"middle": [], |
|
"last": "Mohit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anuj", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sonal Gupta, Rushin Shah, Mrinal Mohit, Anuj Kumar, and Mike Lewis. 2018. Semantic pars- ing for task oriented dialog using hierarchical representations. In Proceedings of the Con- ference on Empirical Methods in Natural Lan- guage Processing.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The ATIS spoken language systems pilot corpus", |
|
"authors": [ |
|
{ |
|
"first": "Charles", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Hemphill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Godfrey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Doddington", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Speech and Natural Language: Proceedings of a Workshop Held at Hidden Valley", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charles T. Hemphill, John J. Godfrey, and George R. Doddington. 1990. The ATIS spo- ken language systems pilot corpus. In Speech and Natural Language: Proceedings of a Work- shop Held at Hidden Valley.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "The principal type-scheme of an object in Combinatory Logic", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Hindley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1969, |
|
"venue": "Transactions of the American Mathematical Society", |
|
"volume": "146", |
|
"issue": "", |
|
"pages": "29--60", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.2307/1995158" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Hindley. 1969. The principal type-scheme of an object in Combinatory Logic. Transactions of the American Mathematical Society, 146:29- 60.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Search-based neural structured learning for sequential question answering", |
|
"authors": [ |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yih", |
|
"middle": [], |
|
"last": "Wen-Tau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohit Iyyer, Wen-tau Yih, and Ming-Wei Chang. 2017. Search-based neural structured learning for sequential question answering. In Proceed- ings of the Annual Meeting of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Global data flow analysis and iterative algorithms", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Kam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Ullman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1976, |
|
"venue": "Journal of the ACM (JACM)", |
|
"volume": "23", |
|
"issue": "1", |
|
"pages": "158--171", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://dl.acm.org/doi/10.1145/321921.321938" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John B. Kam and Jeffrey D. Ullman. 1976. Global data flow analysis and iterative algorithms. Journal of the ACM (JACM), 23(1):158-171.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "Diederik", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In Proceed- ings of the International Conference on Learn- ing Representations.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Opennmt: Open-source toolkit for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuntian", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Senellart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-4012" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Klein, Yoon Kim, Yuntian Deng, Jean Senellart, and Alexander M. Rush. 2017. Open- nmt: Open-source toolkit for neural machine translation. In Proceedings of the Annual Meet- ing of the Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Term Rewriting Systems. Centrum voor Wiskunde en Informatica", |
|
"authors": [ |
|
{ |
|
"first": "Jan", |
|
"middle": [ |
|
"Willem" |
|
], |
|
"last": "Klop", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan Willem Klop. 1990. Term Rewriting Systems. Centrum voor Wiskunde en Informatica.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "An algorithm for pronominal anaphora resolution", |
|
"authors": [ |
|
{ |
|
"first": "Shalom", |
|
"middle": [], |
|
"last": "Lappin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Herbert", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Leass", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Computational Linguistics", |
|
"volume": "20", |
|
"issue": "4", |
|
"pages": "535--561", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shalom Lappin and Herbert J. Leass. 1994. An algorithm for pronominal anaphora resolution. Computational Linguistics, 20(4):535-561.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A theory of type polymorphism in programming", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Milner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1978, |
|
"venue": "Journal of Computer and System Sciences", |
|
"volume": "17", |
|
"issue": "", |
|
"pages": "348--375", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Milner. 1978. A theory of type polymor- phism in programming. Journal of Computer and System Sciences, 17:348-375.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Anaphora Resolution", |
|
"authors": [ |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Mitkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruslan Mitkov. 2014. Anaphora Resolution.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Correcting length bias in neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Murray", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Chiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenton Murray and David Chiang. 2018. Correct- ing length bias in neural machine translation. In Proceedings of the Conference on Machine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Neural Assistant: Joint action prediction, response generation, and latent knowledge reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Neelakantan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Semih", |
|
"middle": [], |
|
"last": "Yavuz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishaal", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Goodrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Duckworth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chinnadhurai", |
|
"middle": [], |
|
"last": "Sankar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xifeng", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.14613" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arvind Neelakantan, Semih Yavuz, Sharan Narang, Vishaal Prasad, Ben Goodrich, Daniel Duckworth, Chinnadhurai Sankar, and Xifeng Yan. 2019. Neural Assistant: Joint action pre- diction, response generation, and latent knowl- edge reasoning. arXiv:1910.14613 [cs.LG].", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "GloVe: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christo- pher D. Manning. 2014. GloVe: Global vec- tors for word representation. In Proceedings of the Conference on Empirical Methods in Natu- ral Language Processing, pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A speech understanding system based on statistical representation of semantics", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Pieraccini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evelyne", |
|
"middle": [], |
|
"last": "Tzoukermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zakhar", |
|
"middle": [], |
|
"last": "Gorelov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J.-L", |
|
"middle": [], |
|
"last": "Gauvain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Esther", |
|
"middle": [], |
|
"last": "Levin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C.-H", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jay", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Wilpon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Proceedings of 1992 IEEE International Conference on Acoustics, Speech, and Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "193--196", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Pieraccini, Evelyne Tzoukermann, Za- khar Gorelov, J.-L. Gauvain, Esther Levin, C.- H. Lee, and Jay G. Wilpon. 1992. A speech understanding system based on statistical repre- sentation of semantics. In Proceedings of 1992 IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP), vol- ume 1, pages 193-196. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Collagen: Applying collaborative discourse theory to human-computer interaction", |
|
"authors": [ |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Rich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Candace", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Sidner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neal", |
|
"middle": [], |
|
"last": "Lesh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "AI Magazine", |
|
"volume": "22", |
|
"issue": "4", |
|
"pages": "15--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charles Rich, Candace L. Sidner, and Neal Lesh. 2001. Collagen: Applying collaborative dis- course theory to human-computer interaction. AI Magazine, 22(4):15-15.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Get to the point: Summarization with pointer-generator networks", |
|
"authors": [ |
|
{ |
|
"first": "Abigail", |
|
"middle": [], |
|
"last": "See", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1073--1083", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abigail See, Peter J. Liu, and Christopher D. Manning. 2017. Get to the point: Summa- rization with pointer-generator networks. In Proceedings of the Annual Meeting of the As- sociation for Computational Linguistics, pages 1073-1083.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Dialogue act modeling for automatic tagging and recognition of conversational speech", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Ries", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Coccaro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Shriberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Bates", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Taylor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carol", |
|
"middle": [], |
|
"last": "Van Ess-Dykema", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie", |
|
"middle": [], |
|
"last": "Meteer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Computational Linguistics", |
|
"volume": "26", |
|
"issue": "3", |
|
"pages": "339--373", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas Stolcke, Klaus Ries, Noah Coccaro, Eliz- abeth Shriberg, Rebecca Bates, Daniel Juraf- sky, Paul Taylor, Rachel Martin, Carol Van Ess- Dykema, and Marie Meteer. 2000. Dialogue act modeling for automatic tagging and recognition of conversational speech. Computational Lin- guistics, 26(3):339-373.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Learning to map context-dependent sentences to executable formal queries", |
|
"authors": [ |
|
{ |
|
"first": "Alane", |
|
"middle": [], |
|
"last": "Suhr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Srinivasan", |
|
"middle": [], |
|
"last": "Iyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Human Language Technology Conference of the North American Chapter", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alane Suhr, Srinivasan Iyer, and Yoav Artzi. 2018. Learning to map context-dependent sentences to executable formal queries. In Proceedings of the Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "A survey of rewriting strategies in program transformation systems", |
|
"authors": [ |
|
{ |
|
"first": "Eelco", |
|
"middle": [], |
|
"last": "Visser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Electronic Notes in Theoretical Computer Science", |
|
"volume": "57", |
|
"issue": "", |
|
"pages": "109--143", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eelco Visser. 2001. A survey of rewriting strate- gies in program transformation systems. Elec- tronic Notes in Theoretical Computer Science, 57:109-143.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "A new corpus and imitation learning framework for context-dependent semantic parsing", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Vlachos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "547--560", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas Vlachos and Stephen Clark. 2014. A new corpus and imitation learning framework for context-dependent semantic parsing. Trans- actions of the Association for Computational Linguistics, 2:547-560.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "A network-based end-to-end trainable task-oriented dialogue system", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Tsung-Hsien Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Vandyke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Mrk\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lina", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Gasic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pei-Hao", |
|
"middle": [], |
|
"last": "Rojas Barahona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Ultes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the European Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "438--449", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Hsien Wen, David Vandyke, Nikola Mrk\u0161i\u0107, Milica Gasic, Lina M. Rojas Barahona, Pei-Hao Su, Stefan Ultes, and Steve Young. 2017. A network-based end-to-end trainable task-oriented dialogue system. In Proceedings of the European Association for Computational Linguistics, pages 438-449.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Hybrid code networks: Practical and efficient end-to-end dialog control with supervised and reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kavosh", |
|
"middle": [], |
|
"last": "Asadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Zweig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason D. Williams, Kavosh Asadi, and Geoffrey Zweig. 2017. Hybrid code networks: Practi- cal and efficient end-to-end dialog control with supervised and reinforcement learning. In Pro- ceedings of the Annual Meeting of the Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Transferable multidomain state generator for task-oriented dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Chien-Sheng", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Madotto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ehsan", |
|
"middle": [], |
|
"last": "Hosseini-Asl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascale", |
|
"middle": [], |
|
"last": "Fung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chien-Sheng Wu, Andrea Madotto, Ehsan Hosseini-Asl, Caiming Xiong, Richard Socher, and Pascale Fung. 2019. Transferable multi- domain state generator for task-oriented dialogue systems. In Proceedings of the Annual Meeting of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "POMDP-based statistical spoken dialog systems: A review", |
|
"authors": [ |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Gasic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Blaise", |
|
"middle": [], |
|
"last": "Thomson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. IEEE", |
|
"volume": "101", |
|
"issue": "", |
|
"pages": "1160--1179", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steve Young, Milica Gasic, Blaise Thomson, and Jason D. Williams. 2013. POMDP-based sta- tistical spoken dialog systems: A review. Proc. IEEE, 101(5):1160-1179.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "CoSQL: A conversational textto-SQL challenge towards cross-domain natural language interfaces to databases", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heyang", |
|
"middle": [], |
|
"last": "Er", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suyi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victoria", |
|
"middle": [], |
|
"last": "Xi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianze", |
|
"middle": [], |
|
"last": "Chern Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Youxuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michihiro", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungrok", |
|
"middle": [], |
|
"last": "Yasunaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Shim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zifan", |
|
"middle": [], |
|
"last": "Fabbri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luyao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuwen", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shreya", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Dixit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dragomir", |
|
"middle": [], |
|
"last": "Lasecki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1962--1979", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Yu, Rui Zhang, Heyang Er, Suyi Li, Eric Xue, Bo Pang, Xi Victoria Lin, Yi Chern Tan, Tianze Shi, Zihan Li, Youxuan Jiang, Michihiro Ya- sunaga, Sungrok Shim, Tao Chen, Alexander Fabbri, Zifan Li, Luyao Chen, Yuwen Zhang, Shreya Dixit, Vincent Zhang, Caiming Xiong, Richard Socher, Walter Lasecki, and Dragomir Radev. 2019a. CoSQL: A conversational text- to-SQL challenge towards cross-domain natu- ral language interfaces to databases. In Pro- ceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Nat- ural Language Processing (EMNLP-IJCNLP), pages 1962-1979, Hong Kong, China.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "SParC: Cross-domain semantic parsing in context", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michihiro", |
|
"middle": [], |
|
"last": "Yasunaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Chern Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xi", |
|
"middle": [], |
|
"last": "Victoria Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suyi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heyang", |
|
"middle": [], |
|
"last": "Er", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Irene", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Yu, Rui Zhang, Michihiro Yasunaga, Yi Chern Tan, Xi Victoria Lin, Suyi Li, Heyang Er, Irene Li, Bo Pang, Tao Chen, et al. 2019b. SParC: Cross-domain semantic parsing in context. In Proceedings of the Annual Meeting of the Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Using Inductive Logic Programming to Automate the Construction of Natural Language Parsers", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Zelle", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Zelle. 1995. Using Inductive Logic Pro- gramming to Automate the Construction of Nat- ural Language Parsers. Ph.D. thesis, Depart- ment of Computer Sciences, The University of Texas at Austin.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Learning context-dependent mappings from sentences to logical form", |
|
"authors": [ |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "976--984", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luke Zettlemoyer and Michael Collins. 2009. Learning context-dependent mappings from sentences to logical form. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Con- ference on Natural Language Processing of the AFNLP, pages 976-984. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Editing-based SQL query generation for cross-domain context-dependent questions", |
|
"authors": [ |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungrok", |
|
"middle": [], |
|
"last": "Er", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Shim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xi", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianze", |
|
"middle": [], |
|
"last": "Victoria Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dragomir", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rui Zhang, Tao Yu, He Yang Er, Sungrok Shim, Eric Xue, Xi Victoria Lin, Tianze Shi, Caiming Xiong, Richard Socher, and Dragomir Radev. 2019. Editing-based SQL query generation for cross-domain context-dependent questions. In Proceedings of the Conference on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Towards end-to-end learning for dialog state tracking and management using deep reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Tiancheng", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxine", |
|
"middle": [], |
|
"last": "Eskenazi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W16-3601" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tiancheng Zhao and Maxine Eskenazi. 2016. To- wards end-to-end learning for dialog state track- ing and management using deep reinforcement learning. In Proceedings of the 17th Annual Meeting of the Special Interest Group on Dis- course and Dialogue, pages 1-10, Los Angeles. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "PEGASUS: A spoken dialogue interface for online air travel planning", |
|
"authors": [ |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Victor Zue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Seneff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Polifroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Phillips", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Pao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Goodine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Goddeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Speech Communication", |
|
"volume": "15", |
|
"issue": "3-4", |
|
"pages": "331--340", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor Zue, Stephanie Seneff, Joseph Polifroni, Michael Phillips, Christine Pao, David Good- ine, David Goddeau, and James Glass. 1994. PEGASUS: A spoken dialogue interface for on- line air travel planning. Speech Communica- tion, 15(3-4):331-340.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF2": { |
|
"html": null, |
|
"content": "<table><tr><td>refer(Constraint[Event](date=</td></tr><tr><td>Constraint[DateTime](weekday=thurs)))</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Constraint[DateTime]). 3 Constraints are predicates built from boolean connectives and the constructions illustrated below: 4 \u2022 Type constraints: the meeting maps to the call refer(Constraint[Event]()), where the constraint matches all nodes with values of type Event. \u2022 Property constraints: Assuming a structured Event type with a date property, and a Date type with a weekday property, the Thursday meeting maps to this nested call:", |
|
"num": null |
|
}, |
|
"TABREF10": { |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "", |
|
"num": null |
|
}, |
|
"TABREF11": { |
|
"html": null, |
|
"content": "<table><tr><td/><td>Full</td><td/><td colspan=\"2\">Ref. Turns</td><td colspan=\"2\">Rev. Turns</td></tr><tr><td/><td>dev</td><td>test</td><td>dev</td><td>test</td><td>dev</td><td>test</td></tr><tr><td colspan=\"7\"># of Turns 13,499 21,224 3,554 8,965 1,052 3,315</td></tr><tr><td>Dataflow</td><td>.729</td><td>.665</td><td>.642</td><td>.574</td><td>.697</td><td>.565</td></tr><tr><td>inline</td><td>.696</td><td>.606</td><td>.533</td><td>.465</td><td>.631</td><td>.474</td></tr></table>", |
|
"type_str": "table", |
|
"text": "because of differences in tokenization (see code release).", |
|
"num": null |
|
}, |
|
"TABREF12": { |
|
"html": null, |
|
"content": "<table><tr><td>Agent action accuracy is</td></tr><tr><td>significantly higher than a baseline without metacom-</td></tr><tr><td>putation, especially on turns that involve a reference</td></tr><tr><td>(Ref. Turns) or revision (Rev. Turns) to earlier turns in</td></tr><tr><td>the dialogue (p < 10 \u22126 , McNemar's test).</td></tr></table>", |
|
"type_str": "table", |
|
"text": "SMCalFlow results.", |
|
"num": null |
|
}, |
|
"TABREF14": { |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "MultiWOZ 2.1 test set results. TRADE", |
|
"num": null |
|
}, |
|
"TABREF16": { |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Manual classification of 100 model errors on the SMCalFlow dataset. The largest categories are underprediction (omitting steps from agent programs), entity linking (errors in extraction of entities from user utterances, fencing (classifying a user request as outof-scope), and ambiguity (user utterances with multiple possible interpretations). See \u00a77 for discussion.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |