|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T02:10:33.541111Z" |
|
}, |
|
"title": "Multi-modal Intent Classification for Assistive Robots with Large-scale Naturalistic Datasets", |
|
"authors": [ |
|
{ |
|
"first": "Karun", |
|
"middle": [], |
|
"last": "Mathew", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Tarigoppula", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Lea", |
|
"middle": [], |
|
"last": "Frermann", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Newline", |
|
"middle": [], |
|
"last": "Structures", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Pvt", |
|
"middle": [], |
|
"last": "Ltd", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Recent years have brought a tremendous growth in assistive robots/prosthetics for people with partial or complete loss of upper limb control. These technologies aim to help the users with various reaching and grasping tasks in their daily lives such as picking up an object and transporting it to a desired location; and their utility critically depends on the ease and effectiveness of communication between the user and robot. One of the natural ways of communicating with assistive technologies is through verbal instructions. The meaning of natural language commands depends on the current configuration of the surrounding environment and needs to be interpreted in this multi-modal context, as accurate interpretation of the command is essential for a successful execution of the user's intent by an assistive device. The research presented in this paper demonstrates how large-scale situated natural language datasets can support the development of robust assistive technologies. We leveraged a navigational dataset comprising > 25k human-provided natural language commands covering diverse situations. We demonstrated a way to extend the dataset in a task-informed way and use it to develop multi-modal intent classifiers for pick and place tasks. Our best classifier reached > 98% accuracy in a 16-way multi-modal intent classification task, suggesting high robustness and flexibility.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Recent years have brought a tremendous growth in assistive robots/prosthetics for people with partial or complete loss of upper limb control. These technologies aim to help the users with various reaching and grasping tasks in their daily lives such as picking up an object and transporting it to a desired location; and their utility critically depends on the ease and effectiveness of communication between the user and robot. One of the natural ways of communicating with assistive technologies is through verbal instructions. The meaning of natural language commands depends on the current configuration of the surrounding environment and needs to be interpreted in this multi-modal context, as accurate interpretation of the command is essential for a successful execution of the user's intent by an assistive device. The research presented in this paper demonstrates how large-scale situated natural language datasets can support the development of robust assistive technologies. We leveraged a navigational dataset comprising > 25k human-provided natural language commands covering diverse situations. We demonstrated a way to extend the dataset in a task-informed way and use it to develop multi-modal intent classifiers for pick and place tasks. Our best classifier reached > 98% accuracy in a 16-way multi-modal intent classification task, suggesting high robustness and flexibility.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Paralysis is a loss of motor function to varying degrees of severity often resulting in severely reduced or complete loss of upper and/or lower limb control. Such impairments reduce the quality of life for millions of people affected by paralysis (Armour et al., 2016) and increase their dependence upon others to perform day-to-day activities including self-or 0 Work done while at Melbourne University. The system receives visual information extracted from the environment together with a natural language task command as input; and uses this to predicts the intent as a suitable sequence of actions necessary to execute the command. Visual scene parsing and cross-modal entity linking are not tackled in this work. object locomotion and object manipulation tasks like reaching, picking up an object and moving it to a desired location (pick and place). Assistive devices can compensate for some of the impairments provided that they can accurately infer and execute user intents. Most assistive devices currently in use rely on manual control (e.g., wheelchairs controlled with joysticks), and cannot understand natural language user commands or map them to potentially complex sequences of actions. Moreover, they do not perceptively account for the surrounding environment they are interacting with and as a consequence require a more detailed user input. Therefore, recent developments have focused on Intelligent Assistive Devices (IAD), that combine traditional assistive devices with advanced sensors and artificial intelligence, aiming for an accurate inference of a user's intent in the context of a multimodal representation of the environment (Barry et al., 1994) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 247, |
|
"end": 268, |
|
"text": "(Armour et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1656, |
|
"end": 1676, |
|
"text": "(Barry et al., 1994)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The utility of the IAD depends critically on the efficiency and effectiveness of the communication with the user. One of the natural ways of instructing the IAD is through verbal communication. It is important to recognize that a majority of patients suffering a loss of limb control retain the ability to speak, albeit impaired in some cases. Modern voice controlled IADs such as wheelchairs (Hou et al., 2020; Umchid et al., 2018) , smart home appliances and assistive anthropomorphic robots (Pulikottil et al., 2018a; John et al., 2020) are still limited to a pre-defined set of instructions that the user can choose from. This requires the user to explicitly dictate each individual action leading to the final goal rather than just stating the desired goal alone and off-loading the decision making to perform any required sequence of actions to accomplish the user's intent. Consider the example in Figure 1 , where a robotic assistant situated in a complex and dynamic environment is given a verbal instruction \"Pick up the book\". While the need of a \"pick\" action is evident from the language command alone, possible additional actions (navigate to the book's location, or to turn around to face the book) depend on the agent and book's location, thus requiring an interpretation of the natural language command in the context of the surrounding environment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 393, |
|
"end": 411, |
|
"text": "(Hou et al., 2020;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 412, |
|
"end": 432, |
|
"text": "Umchid et al., 2018)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 520, |
|
"text": "(Pulikottil et al., 2018a;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 521, |
|
"end": 539, |
|
"text": "John et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 905, |
|
"end": 913, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we present a step towards bridging this gap by drawing on large, data resources, state of the art language understanding and intent classification methods. We develop a classifier that takes a higher-order task command contextualized in the current environment as input and derives the necessary set of sub-actions (intents) required to achieve the goal intended by the user. We present a scalable framework to develop such flexible natural language interfaces for IAD that execute 'pick and place' tasks. Specifically, we leverage AL-FRED (Shridhar et al., 2020) , a large-scale naturalistic data set for developing indoor navigation systems, comprising diverse, crowd-sourced natural language commands and photo-realistic images, and adapt it to the pick-and-place task (Section 3). We augment the state-of-the-art natural language intent classifier DIET (Bunk et al., 2020) with a visual processing component (Section 4). Evaluation against simpler classifiers as well as exclusively text-based classification scenarios shows the advantage of joint processing of visual and language information, as well as the DIET architecture. The use of large-scale naturalistic data allows to build solutions that generalize beyond the confines of a laboratory, are easily adaptable and have the po-tential to improve the overall quality of life for the user. This framework is part of a larger project intended to develop a multi-modal (voice and brain signal) prosthetic limb control.", |
|
"cite_spans": [ |
|
{ |
|
"start": 555, |
|
"end": 578, |
|
"text": "(Shridhar et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 872, |
|
"end": 891, |
|
"text": "(Bunk et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In short, our contributions are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We show that task-related large-scale data sets can effectively support the development assistive technology. We augmented the AL-FRED data set with anticipated scenarios of human-assistive agent interaction, including noisy and partially observed scenarios.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We contribute a multi-modal extension of a state-of-the-art natural language intent classifier (DIET) with a visual component, which lead to the overall best classification results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Our best performing model achieved 98% accuracy in a 16-way classification task over diverse user-generated commands, evidencing that our architecture supports flexible and reliable intent classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our work is cross-disciplinary, covering both medical robotics and (multi-modal) machine learning and NLP for intent classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Intent classification is the task of mapping a natural language input to a set of actions that when executed help achieve the underlying goals of the user.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As an essential component of conversational systems, it has attracted much attention in the natural language understanding community with methods ranging from semantic parsing (Chen and Mooney, 2011) to more recent deep learning (Liu and Lane, 2016; Goo et al., 2018) and transfer learning approaches, with unsupervised pre-training (Liu et al., 2019; Henderson et al., 2020) . The on-line interactive nature of dialogue applications makes model efficiency a central objective. We build on the recent DIET classifier (Dual Intent and Entity Transformer; (Bunk et al., 2020) ) which achieves competitive performance in intent classification, while maintaining a lightweight architecture without the need for a large pre-trained language models. DIET was originally developed for language-based dialogue, and we extend the system with a vision understanding component and show that it generalizes to a multi-modal task setup.", |
|
"cite_spans": [ |
|
{ |
|
"start": 176, |
|
"end": 199, |
|
"text": "(Chen and Mooney, 2011)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 249, |
|
"text": "(Liu and Lane, 2016;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 250, |
|
"end": 267, |
|
"text": "Goo et al., 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 351, |
|
"text": "(Liu et al., 2019;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 375, |
|
"text": "Henderson et al., 2020)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 573, |
|
"text": "(Bunk et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Visually grounded language understanding addresses the analysis of verbal commands in the context of the visual environment. Prior work ranges from schematic representations of the environment avoiding the need for image analysis (Chen and Mooney, 2011) over simplistic visual environments (\"block worlds\" Bisk et al. 2016)to complex outdoor navigation (Chen et al., 2019) ). The advance of deep learning methods for joint visual and textual processing has lead to the development of large-scale datasets which feature both naturalisitic language as well as images (Bunk et al., 2020; Chen et al., 2019; Puig et al., 2018) . We leverage a subset of the ALFRED dataset (Bunk et al., 2020) which is a benchmark dataset for learning a mapping from natural language instructions and egocentric (first person) vision to sequences of actions for performing household tasks. The commands in the ALFRED dataset are crowd-sourced from humans, and as such are diverse and resemble naturalistic language. The visual scenes are complex and photo-realistic, and the dataset contains tasks requiring the agent to execute complex sequences of multiple, context-dependent actions to manipulate objects in an environment that closely resembles the medical application scenario addressed in this paper. We note that we do not address the object recognition challenge in this work, but assume access to the object locations, and train intent classifiers to incorporate such information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 230, |
|
"end": 253, |
|
"text": "(Chen and Mooney, 2011)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 372, |
|
"text": "(Chen et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 565, |
|
"end": 584, |
|
"text": "(Bunk et al., 2020;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 585, |
|
"end": 603, |
|
"text": "Chen et al., 2019;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 604, |
|
"end": 622, |
|
"text": "Puig et al., 2018)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 687, |
|
"text": "(Bunk et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Interfacing medical assistive technologies Traditional interfaces to assistive technologies involved manipulating joysticks (House et al., 2009) , or verbal commands which are restricted to simple templates. The latter include very simple templates (\"up\", \"down\", \"left\"; Pulikottil et al. (2018b)), or highly constrained training data sets based on command templates produced by five human annotators (Stepputtis et al., 2020) . In this paper, we leverage natural commands produced by thousands of crowd workers with the aim to produce a robust intent classifier amenable to natural speech input.", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 144, |
|
"text": "(House et al., 2009)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 427, |
|
"text": "(Stepputtis et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We leveraged and extended the ALFRED (Action Learning From Realistic Environments and Directives) dataset of visually grounded language commands (Shridhar et al., 2020) , for training and testing our intent classifier. ALFRED consists of more than 8,000 sets of scenes with unique environmental layout with a fixed set of associated movable and static objects. Each scene is paired with an indoor navigation task, and contains three levels of information: (1) positional information of the agent and objects, (2) natural language descriptions of the high-level task and low-level instructions to achieve the goal, and (3) a sequence of discrete actions to be performed by the agent to achieve the goal. An example is shown in Figure 2 . The visual task information comprises the positional (x, y, z) co-ordinates of the agent (Agent Information), and the positional information of static and interactable objects in the environment (Scene Information). The natural language annotation includes a \"high-level task\" describing the overall goal, as well as detailed low-level instructions (\"low-level subtasks\") on how to achieve the goal. Low level instructions were provided by at least three human annotators through crowdsourcing. Finally, each ALFRED task in the train and validation set is augmented with an \"action plan\" listing the sequence of actions (or intents) such as GoToLocation or PickUpObject required to achieve the goal in the context of the scene configuration ( Figure 2 , bottom). Crowd workers were prompted by these action plans, so that a goldstandard utterance-intent alignment could be derived from the data set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 168, |
|
"text": "(Shridhar et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 726, |
|
"end": 734, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 1480, |
|
"end": 1488, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We utilized a subset of the dataset corresponding to \"pick and place\" tasks, which is most relevant to our target application of humanoid arm control. We refer to the item that is to be picked up as \"target object\" and the item on which the picked-up object is to be placed as the \"receptacle object\". ALFRED contains around 3,000 different pick and place tasks, involving 58 unique target objects and 26 receptacle objects across 120 indoor scenes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ALFRED for intent classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Leveraging the ALFRED action plans, we could map all \"pick and place\" language commands to a combination of three unique subactions: GoToLocation 1 , PickUpObject and PutObject. GoToLocation actions referred to actions of the agent moving to a given location. PickUpObject and PutObject corresponded to the action of picking up the target object and placing the target object, respectively. Note that a single natural language directive can cover one or more atomic actions. We refer to com- ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ALFRED for intent classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "High Level Task \"Move the purple pillow from the couch to the black chair.\" Low Level Subtask 1 \"Turn right and walk up to the couch.\" Low Level Subtask 2 \"Pick up the purple pillow off of the couch.\" Low Level Subtask 3 \"Turn around and walk across the room, then hand a left and walk over to the black chair.\" Low Level Subtask 4 \"Put the purple pillow on the black chair.\" mands describing a single task as \"single intent\" (\"Pick up the keys.\"), and commands describing multiple tasks as \"multi-intent\" (\"Bring the keys from the chair to the table.\"\"). Table 1 illustrates the range of tasks and intents supported by the original ALFRED dataset and resulting training instances. In the original ALFRED data set, each low-level instruction was associated with a single intent (Table 1 middle ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 556, |
|
"end": 563, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 778, |
|
"end": 793, |
|
"text": "(Table 1 middle", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Language Information", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We augmented high-level task descriptions with intents by concatenating the actions of its associated low-level tasks (Table1 top). In addition, we augmented the ALFRED tasks with additional diverse and relevant scenarios to our assistive agent use case. First, we created partial tasks where the agent was required to execute only parts of the complete pick and place action sequence (e.g., only move to, and pick up the object). We synthesized these instances by concatenating all possible ordered subsequences of the low-level sub-tasks for a scenario and concatenating their corresponding natural language commands. The resulting instances were then treated as a single \"multi-intent\" directive (Table 1, bottom) . Second, we randomized the positions of the target and receptacle objects mentioned in the verbal commands to (1) far from the agent, (2) near the agent or (3) near the receptacle.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 699, |
|
"end": 716, |
|
"text": "(Table 1, bottom)", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Language Information", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, we imposed physical constraints onto the agent, resembling the characteristics of an assistive robotic arm. In the original ALFRED, all objects within a specific distance of the agent are considered 'pickable'. We introduced a threshold (60 degrees) beyond which an object is unreachable and requires the agent to turn to the object first. We introduced a corresponding new action called RotateAgent that needed to be performed before the PickUpObject. In addition, we reduced the maximum reach distance of the agent to 0.5 meters and updated ALFRED tasks accordingly with GoToLocation actions before PickUpObject where necessary. The resulting dataset more realistically represented the physical constraints faced by real world entities, and the actions to be taken to meet the necessary preconditions to perform a task. We also handled cases where visual features corresponding to a language command were missing or irrelevant. For example, the command \"Take a step forward\", has a single intent GoToLocation when considering the natural language command alone. For such commands, we generated multiple data instances with randomized visual features to encourage the model to be insensitive to an irrelevant input modality.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language Information", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We divided our final dataset into nonoverlapping training, testing and validation sets with no overlap in environments. We treated each unique action combination observed in the data as a distinct intent, leading to a total of 16 possible intents that could be selected in response to a spoken command.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language Information", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "2 Table 2 summarizes \"Turn right and walk up to the couch. Pick up the red pillow off the couch. Turn around and walk . . . to the chair. Put the red pillow on the chair.\"", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 20, |
|
"text": "Table 2 summarizes", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Language Information", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "{GoToLocation, PickUpObject, GoToLocation, PutObject } data set statistics are in Table 5 in the appendix.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 89, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Language Information", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our intent classification model took vector representations of the language command and visual context as input and predicted the underlying intent as one of 16 classes. We briefly describe the representation schemes for scene and language input. Afterwards, we present our proposed model, which extended a state-of-the-art language intent classifier to handle both visual and language input.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The visual data corresponding to a task instance in ALFRED dataset included the agent and object position information (Figure 2 , Agent and Scene information). We represented the visual information of each task as a 4-dimensional vector with elements corresponding to (i) The L2 (Euclidean) distance between agent and target object, (ii) L2 distance between agent and receptacle object, (iii) L2 distance between target and receptacle object and (iv) the angle between the target object and the direction the agent is facing initially.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 127, |
|
"text": "(Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Visual Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We transformed the language command to pretrained word embeddings (Pennington et al., 2014; Kenton and Toutanova, 2019; Peters et al., 2018) . Specifically, we use Tok2Vec embeddings provided by SpaCy. 3 We mapped each word in an input command to its corresponding embedding and obtained a representation for the entire command by averaging the word embeddings. Following (Bunk et al., 2020) we augment the embeddings with word-and character-level n-grams. DIET is a state of the art, natural language intent classification architecture developed for dialogue understanding tasks (Bunk et al., 2020) . DIET classifiers are attractive for application to assistive technologies because they can be trained rapidly and work well even with small datasets. The DIET classifier represents natural language inputs as described above (Sec 4.2). This input representation is passed through a neural network transformer architecture (Vaswani et al., 2017) which is a state-ofthe-art architecture for computing contextualized representations of input sequences. DIET is optimized to maximize the similarity between the final representation of the verbal command and an embedded representation of the true intent. We follow their optimization procedure, and at test time we predicted the intent with the closest predicted embedding to the gold label. We used the official implementation, with default parameters. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 91, |
|
"text": "(Pennington et al., 2014;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 92, |
|
"end": 119, |
|
"text": "Kenton and Toutanova, 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 120, |
|
"end": 140, |
|
"text": "Peters et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 203, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 372, |
|
"end": 391, |
|
"text": "(Bunk et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 599, |
|
"text": "(Bunk et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 923, |
|
"end": 945, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We extended the DIET classifier to a multi-modal model (DIET-M) which predicted intents based on language and scene features. The language input was encoded exactly as in the original model. We then concatenated the output of the transformer along with the 4-dimensional numerical visual features and passed the result first through a 10% dropout layer, followed by two feed-forward layers of sizes 256 and 128 and finally through an output layer of size 40 to obtain a combined visual and language representation. ReLU was used as the activation function for all the feed-forward layers. This joint embedded representation was then used to identify the intents following DIET's original training objective, as described above.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-modal DIET", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We present a series of experiments which assesses the impact of model complexity, multi-modal information as well as our data augmentation on final intent classification performance. This work focuses on robust multi-modal intent classification, and as such our experiments assume that the entity recognition and visual interpretation (such as object detection and location) have been solved externally. We discuss our contribution in the context of an end-to-end application Section 6.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We compare DIET and DIET-M against a Multi-Layer Perceptron (MLP) with a single hidden layer. Two variations of the MLP were tested: (1) MLP which takes as input only the embedded language representations; and (2) MLP-M which is provided with the embedded language representations concatenated with the visual features, resulting in a multi-modal variant. Rectified linear unit (ReLU) was used as the activation function and stochastic gradient descent (Ruder, 2016) was used to minimize a cross-entropy loss. The output of the final layer was passed through a soft-max layer to get the probability distribution across all possible intents. At test time, the intent with the highest probability score was predicted as the true intent associated with a command. We also report a simple majority class baseline, which labels all instances with the most prevalent class in the training set (GoToLocation).", |
|
"cite_spans": [ |
|
{ |
|
"start": 453, |
|
"end": 466, |
|
"text": "(Ruder, 2016)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We report micro-averaged accuracy, acknowledging the class imbalance in our data set, as well as precision recall and F1 measure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Our experiments answered the following questions: (a) how important is the multi-modal (scene) input for accurate intent classification; (b) is a powerful contextual language encoding model necessary to achieve high intent classification performance; and (c) how does the training dataset augmentation impact performance with multi-intent commands? To answer the first question, we compared both machine learning models (DIET-M, MLP-M) against their unimodal language-only versions (DIET, MLP). To answer the second question, we compared the complex DIET classifier against the simpler MLP architecture, and a majority class baseline. Finally, the benefits of data augmentation were ascertained by testing DIET-M's performance on the same testing dataset after training on datasets with different levels of augmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Powerful language encoders improve intent classification accuracy. Table 3 compares the performance of the majority class baseline (Majority), MLP and the DIET classifier. All models were trained and tested on the full, augmented data set. Unsurprisingly, we observed that all machine learning models outperformed the majority class Table 3 : Intent classification performance of the majority class baseline, multi-layer perceptron (MLP) and our DIET classifier in a unimodal and multi-modal setup (-M). We report accuracy (Ac), precision (Pr), recall (Re) and F1-measure.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 74, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 340, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "baseline. Furthermore, the variants of the DIET classifier consistently achieved a higher score than the simpler MLP (improvement of 5.6% absolute accuracy). Even though both models achieve F1 measures > 90%, very high language understanding performance is essential for user satisfaction in dialogue systems in general, and in assistive technology settings in particular. In addition, our evaluation adopted \"laboratory\" conditions, assuming noise-free entity and vision processing. With these arguments in mind, and recalling the fact that DIET is by design fast and efficient, we conclude that state-of-the-art language understanding architectures are preferable for situated intent classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Grounding language in visual context information improved intent classification performance. Table 3 compares multi-modal model variants (DIET-M, MLP-M) -with access to visual and language information -against their unimodal variants, which classify intents based on language commands only and remain agnostic about the visual surroundings. For both the MLP and DIET we observed a substantial improvement with added visual information. This is unsurprising, given the fact that navigational language commands are often high-level and can only be fully disambiguated in the context of the environment. As evidenced by the large performance gain of our multi-modal models over their language-only counterparts, both systems successfully learned to leverage the additional visual context for accurate intent interpretation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 100, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Data augmentation improved performance of DIET-M. We investigated the benefit of data augmentation on the best performing classifier (DIET- Table 4 : The performance of the DIET-M classifiers, trained on datasets with access to 0%, 10%, 50% or 100% of the augmented data. 100% (multi) tests only on the more challenging multi-intent subset of the test data. M) by ablating the amount of augmented training data available to the classifier during training. Specifically, we augment 0%, 10%, 50% or 100% of the original ALFRED instances with multi-subtask variations (as described in Section 3) Rows 1--4 in Table 4 show DIET-M performance trained on data sets with varying amounts of augmentation, and tested on the full, augmented test data. The model improved consistently with increased augmentation of the training data. Even a small amount of augmented data improved performance substantially, while more augmentation leads to diminishing returns. We finally analyzed specifically the benefit of data augmentation on understanding multi-intent commands, i.e., language commands which imply sequences of actions (bottom part of Table 1 ). To this end, we evaluated the classifier only on multi-intent commands. The result in the final row of Table 4 shows that the performance on these longer and more complex instances was practically on par with performance on the full test set, confirming that DIET-M successfully maps abstract comments to sequences of actions.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 147, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 606, |
|
"end": 613, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1131, |
|
"end": 1138, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1245, |
|
"end": 1252, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We leveraged and extended a large-scale dataset of indoor navigation tasks to develop an intent classification component for robotic arm control to perform \"pick and place\" tasks. Our novel multimodal DIET classifier exceeded 98% in classification performance in an \"in vitro\" evaluation setup. We now discuss limitations of our work as well as future directions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Toward end-to-end task completion. The intent classifier will be embedded in a larger system in order to enable end-to-end task completion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In our evaluation, we assumed that visual scene parsing (including object recognition and location) as well as entity recognition in the language had been solved perfectly and externally. In an ongoing project, the presented system is integrated with these components, leveraging the recent improvements and corresponding tools and frameworks powered by advances in machine learning, robotics and data sets (Liu et al., 2020; Zhu et al., 2020; Redmon and Farhadi, 2018) . This paper presented a highly accurate system which provides a strong foundation and promising starting point for endto-end integration as well as experiments under noisy conditions (e.g., malformed or ambiguous utterances, or speech recognition errors).", |
|
"cite_spans": [ |
|
{ |
|
"start": 407, |
|
"end": 425, |
|
"text": "(Liu et al., 2020;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 443, |
|
"text": "Zhu et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 469, |
|
"text": "Redmon and Farhadi, 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Diversity of tasks and inputs Our study was constrained to \"pick-and-place\" tasks which (a) are conceptually straightforward and (b) are typically expressed in a fairly regular, formulaic manner. Even though the underlying ALFRED data set was diverse and somewhat noisy due to its crowdsourced nature, future work will extend our scenario to more complex tasks. ALFRED includes a variety of tasks beyond \"pick-and-place\" and can directly support this line of work. Our way of constructing multi-intent subtasks by concatenating low-level descriptions biased the data towards long descriptions and an underrepresentations of co-referential pronouns (e.g., \"Pick up the keys and put them in the bowl\"). Future work could leverage a mix of human data collection and natural language generation from language models to further augment the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The accuracy-flexibility trade-off. This work developed a highly accurate intent classifier motivated by the fact that efficient and reliable language understanding is paramount to effective humanrobot interaction. To achieve this, we limited the scenarios to a single task type as well as a simple but inflexible intent classification task: We exhaustively enumerated possible intents as 16 classes, thus preventing the model from meaningfully classifying an input that does not correspond to one of these categories. A more flexible system would predict a sequence of atomic intent labels of varying length. To this end, the task could be reframed as multi-label classification; or a sequenceto-sequence model could be developed to translate a natural language input into a sequence of intent labels. Analyzing the trade-off between reliability and flexibility in the context of robust multi-modal intent classification for assistive technologies is a fruitful direction for future research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "This paper presented a multi-modal intent classifier for \"pick-and-place\"-tasks which takes diverse natural language commands as input, and which will be incorporated into a natural language interface of an assistive robotic arm. Our work will help to improve the naturalness of human-robot communication, which to-date often consists of mechanical (joystick) control or formulaic and templated language input. We showed how a large-scale naturalistic data set for general indoor navigation can be adapted to support training of a specific, highaccuracy intent classifier. We extended a state-ofthe-art natural language-based intent classifier to utilize both vision and language information. Our evaluation showed the effectiveness of our data augmentation, and the importance of multi-modal signal for our task. We hope that our work motivates a wider, cross-disciplinary use of large-scale naturalistic data sets -which are becoming more ubiquitous in the NLP and ML communities -as a valuable resource for developing flexible intelligent assistive technologies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "in analogy to a lateral or vertical movement of the robotic arm", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In addition to the 9 unique intents inTable 1, these are {PickUpObject, PutObject}, {RotateAgent,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://rasa.com/docs/rasa/reference/ rasa/nlu/classifiers/diet_classifier/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research was supported by the ARC Industry Transformational Training Centre IC170100030 grant.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Prevalence and causes of paralysis-united states", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Brian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Armour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Courtney-Long", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heidi", |
|
"middle": [], |
|
"last": "Fox", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Fredine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cahill", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "American journal of public health", |
|
"volume": "106", |
|
"issue": "10", |
|
"pages": "1855--1857", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brian S Armour, Elizabeth A Courtney-Long, Michael H Fox, Heidi Fredine, and Anthony Cahill. 2016. Prevalence and causes of paralysis-united states, 2013. American journal of public health, 106(10):1855-1857.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Intelligent assistive technologies", |
|
"authors": [ |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Barry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Dockery", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Littman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Barry", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Presence: Teleoperators & Virtual Environments", |
|
"volume": "3", |
|
"issue": "3", |
|
"pages": "208--215", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philip Barry, John Dockery, David Littman, and Melanie Barry. 1994. Intelligent assistive technolo- gies. Presence: Teleoperators & Virtual Environ- ments, 3(3):208-215.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Natural language communication with robots", |
|
"authors": [ |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Bisk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deniz", |
|
"middle": [], |
|
"last": "Yuret", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "751--761", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonatan Bisk, Deniz Yuret, and Daniel Marcu. 2016. Natural language communication with robots. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 751-761.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Diet: Lightweight language understanding for dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Tanja", |
|
"middle": [], |
|
"last": "Bunk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daksh", |
|
"middle": [], |
|
"last": "Varshneya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Vlasov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Nichol", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.09936" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tanja Bunk, Daksh Varshneya, Vladimir Vlasov, and Alan Nichol. 2020. Diet: Lightweight language un- derstanding for dialogue systems. arXiv preprint arXiv:2004.09936.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Learning to interpret natural language navigation instructions from observations", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [], |
|
"last": "Mooney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "25", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Chen and Raymond Mooney. 2011. Learning to interpret natural language navigation instructions from observations. In Proceedings of the AAAI Con- ference on Artificial Intelligence, volume 25.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Touchdown: Natural language navigation and spatial reasoning in visual street environments", |
|
"authors": [ |
|
{ |
|
"first": "Howard", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alane", |
|
"middle": [], |
|
"last": "Suhr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipendra", |
|
"middle": [], |
|
"last": "Misra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Snavely", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "12538--12547", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Howard Chen, Alane Suhr, Dipendra Misra, Noah Snavely, and Yoav Artzi. 2019. Touchdown: Nat- ural language navigation and spatial reasoning in visual street environments. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pat- tern Recognition, pages 12538-12547.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Slot-gated modeling for joint slot filling and intent prediction", |
|
"authors": [ |
|
{ |
|
"first": "Guang", |
|
"middle": [], |
|
"last": "Chih-Wen Goo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun-Kai", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chih-Li", |
|
"middle": [], |
|
"last": "Hsu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsung-Chieh", |
|
"middle": [], |
|
"last": "Huo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keng-Wei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun-Nung", |
|
"middle": [], |
|
"last": "Hsu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "753--757", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2118" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chih-Wen Goo, Guang Gao, Yun-Kai Hsu, Chih-Li Huo, Tsung-Chieh Chen, Keng-Wei Hsu, and Yun- Nung Chen. 2018. Slot-gated modeling for joint slot filling and intent prediction. In Proceedings of the 2018 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Pa- pers), pages 753-757, New Orleans, Louisiana. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "ConveRT: Efficient and accurate conversational representations from transformers", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I\u00f1igo", |
|
"middle": [], |
|
"last": "Casanueva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Mrk\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pei-Hao", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsung-Hsien", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2161--2174", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.196" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Henderson, I\u00f1igo Casanueva, Nikola Mrk\u0161i\u0107, Pei-Hao Su, Tsung-Hsien Wen, and Ivan Vuli\u0107. 2020. ConveRT: Efficient and accurate conversa- tional representations from transformers. In Find- ings of the Association for Computational Linguis- tics: EMNLP 2020, pages 2161-2174, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Arduino based voice controlled wheelchair", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tan Kian Hou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "In Journal of Physics: Conference Series", |
|
"volume": "1432", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tan Kian Hou et al. 2020. Arduino based voice con- trolled wheelchair. In Journal of Physics: Confer- ence Series, volume 1432, page 012064. IOP Pub- lishing.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The voicebot: a voice controlled robot arm", |
|
"authors": [ |
|
{ |
|
"first": "Brandi", |
|
"middle": [], |
|
"last": "House", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Malkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Bilmes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the SIGCHI Conference on Human Factors in Computing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "183--192", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brandi House, Jonathan Malkin, and Jeff Bilmes. 2009. The voicebot: a voice controlled robot arm. In Pro- ceedings of the SIGCHI Conference on Human Fac- tors in Computing Systems, pages 183-192.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Assistive device for physically challenged persons using voice controlled intelligent robotic arm", |
|
"authors": [ |
|
{ |
|
"first": "Sneha", |
|
"middle": [], |
|
"last": "Ripcy Anna John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Varghese", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thankam", |
|
"middle": [], |
|
"last": "Sneha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K Martin", |
|
"middle": [], |
|
"last": "Shaji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sagayam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 6th International Conference on Advanced Computing and Communication Systems (ICACCS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "806--810", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ripcy Anna John, Sneha Varghese, Sneha Thankam Shaji, and K Martin Sagayam. 2020. Assistive de- vice for physically challenged persons using voice controlled intelligent robotic arm. In 2020 6th Inter- national Conference on Advanced Computing and Communication Systems (ICACCS), pages 806-810. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [], |
|
"year": 2019, |
|
"venue": "Proceedings of NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of NAACL-HLT, pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Attention-based recurrent neural network models for joint intent detection and slot filling", |
|
"authors": [ |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Lane", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "685--689", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21437/Interspeech.2016-1352" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bing Liu and Ian Lane. 2016. Attention-based recur- rent neural network models for joint intent detection and slot filling. In Interspeech 2016, pages 685-689.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Deep learning for generic object detection: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanli", |
|
"middle": [], |
|
"last": "Ouyang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaogang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Fieguth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinwang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matti", |
|
"middle": [], |
|
"last": "Pietik\u00e4inen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International journal of computer vision", |
|
"volume": "128", |
|
"issue": "2", |
|
"pages": "261--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li Liu, Wanli Ouyang, Xiaogang Wang, Paul Fieguth, Jie Chen, Xinwang Liu, and Matti Pietik\u00e4inen. 2020. Deep learning for generic object detection: A sur- vey. International journal of computer vision, 128(2):261-318.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Benchmarking natural language understanding services for building conversational agents", |
|
"authors": [ |
|
{ |
|
"first": "Xingkun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arash", |
|
"middle": [], |
|
"last": "Eshghi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pawel", |
|
"middle": [], |
|
"last": "Swietojanski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Verena", |
|
"middle": [], |
|
"last": "Rieser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "10th International Workshop on Spoken Dialogue Systems Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xingkun Liu, Arash Eshghi, Pawel Swietojanski, and Verena Rieser. 2019. Benchmarking natural lan- guage understanding services for building conversa- tional agents. In 10th International Workshop on Spoken Dialogue Systems Technology 2019.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of the 2014 conference on empirical methods in natural language process- ing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2227--2237", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of NAACL-HLT, pages 2227-2237.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Virtualhome: Simulating household activities via programs", |
|
"authors": [ |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Puig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Ra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marko", |
|
"middle": [], |
|
"last": "Boben", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiaman", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tingwu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8494--8502", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xavier Puig, Kevin Ra, Marko Boben, Jiaman Li, Tingwu Wang, Sanja Fidler, and Antonio Torralba. 2018. Virtualhome: Simulating household activities via programs. In Proceedings of the IEEE Confer- ence on Computer Vision and Pattern Recognition, pages 8494-8502.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A voice control system for assistive robotic arms: preliminary usability tests on patients", |
|
"authors": [ |
|
{ |
|
"first": "Terrin", |
|
"middle": [], |
|
"last": "Babu Pulikottil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Caimmi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Grazia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D'", |
|
"middle": [], |
|
"last": "Angelo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emilia", |
|
"middle": [], |
|
"last": "Biffi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefania", |
|
"middle": [], |
|
"last": "Pellegrinelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lorenzo Molinari", |
|
"middle": [], |
|
"last": "Tosatti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "7th IEEE International Conference on Biomedical Robotics and Biomechatronics (Biorob)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "167--172", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Terrin Babu Pulikottil, Marco Caimmi, Maria Grazia D'Angelo, Emilia Biffi, Stefania Pellegrinelli, and Lorenzo Molinari Tosatti. 2018a. A voice control system for assistive robotic arms: preliminary usabil- ity tests on patients. In 2018 7th IEEE International Conference on Biomedical Robotics and Biomecha- tronics (Biorob), pages 167-172. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A voice control system for assistive robotic arms: preliminary usability tests on patients", |
|
"authors": [ |
|
{ |
|
"first": "Terrin", |
|
"middle": [], |
|
"last": "Babu Pulikottil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Caimmi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Grazia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D'", |
|
"middle": [], |
|
"last": "Angelo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emilia", |
|
"middle": [], |
|
"last": "Biffi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefania", |
|
"middle": [], |
|
"last": "Pellegrinelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lorenzo Molinari", |
|
"middle": [], |
|
"last": "Tosatti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "7th IEEE International Conference on Biomedical Robotics and Biomechatronics (Biorob)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "167--172", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Terrin Babu Pulikottil, Marco Caimmi, Maria Grazia D'Angelo, Emilia Biffi, Stefania Pellegrinelli, and Lorenzo Molinari Tosatti. 2018b. A voice control system for assistive robotic arms: preliminary usabil- ity tests on patients. In 2018 7th IEEE International Conference on Biomedical Robotics and Biomecha- tronics (Biorob), pages 167-172. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Yolov3: An incremental improvement", |
|
"authors": [ |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Redmon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.02767" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph Redmon and Ali Farhadi. 2018. Yolov3: An incremental improvement. arXiv preprint arXiv:1804.02767.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "An overview of gradient descent optimization algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1609.04747" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Ruder. 2016. An overview of gradient descent optimization algorithms. arXiv preprint arXiv:1609.04747.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Alfred: A benchmark for interpreting grounded instructions for everyday tasks", |
|
"authors": [ |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Shridhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Thomason", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gordon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Bisk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Winson", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roozbeh", |
|
"middle": [], |
|
"last": "Mottaghi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dieter", |
|
"middle": [], |
|
"last": "Fox", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the IEEE/CVF conference on computer vision and pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "10740--10749", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohit Shridhar, Jesse Thomason, Daniel Gordon, Yonatan Bisk, Winson Han, Roozbeh Mottaghi, Luke Zettlemoyer, and Dieter Fox. 2020. Alfred: A benchmark for interpreting grounded instructions for everyday tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recogni- tion, pages 10740-10749.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Language-conditioned imitation learning for robot manipulation tasks", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Stepputtis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Campbell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariano", |
|
"middle": [], |
|
"last": "Phielipp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chitta", |
|
"middle": [], |
|
"last": "Baral", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heni", |
|
"middle": [], |
|
"last": "Ben Amor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon Stepputtis, Joseph Campbell, Mariano Phielipp, Stefan Lee, Chitta Baral, and Heni Ben Amor. 2020. Language-conditioned imitation learning for robot manipulation tasks. Advances in Neural Information Processing Systems, 33.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Sitthichai Chumsoongnern, Tanun Petthong, and Theera Leeudomwong", |
|
"authors": [ |
|
{ |
|
"first": "Sumet", |
|
"middle": [], |
|
"last": "Umchid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pitchaya", |
|
"middle": [], |
|
"last": "Limhaprasert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "11th Biomedical Engineering International Conference (BMEiCON)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--5", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sumet Umchid, Pitchaya Limhaprasert, Sitthichai Chumsoongnern, Tanun Petthong, and Theera Leeu- domwong. 2018. Voice controlled automatic wheelchair. In 2018 11th Biomedical Engineering International Conference (BMEiCON), pages 1-5. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Xiaobing Yuan, and Nasser Kehtarnavaz. 2020. A review of video object detection: Datasets, metrics and methods", |
|
"authors": [ |
|
{ |
|
"first": "Haidi", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haoran", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baoqing", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Applied Sciences", |
|
"volume": "10", |
|
"issue": "21", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haidi Zhu, Haoran Wei, Baoqing Li, Xiaobing Yuan, and Nasser Kehtarnavaz. 2020. A review of video object detection: Datasets, metrics and methods. Ap- plied Sciences, 10(21):7834.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"text": "High-level overview of our intent classifier.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"text": "Visual and Language information corresponding to a pick and place task in ALFRED, as well as the associated Action Plan, i.e., sequence of actions (or intents), as provided in the the data set.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"text": "right and walk up to the couch.\" {GoToLocation } 3. \"Pick up the red pillow off the couch.\" {PickUpObject } 4. \"Turn around and walk. . . to the chair.\" {GoToLocation } 5. \"Put the red pillow on the chair.\" {PutObject } Low-level multi intent 6.\"Turn right and walk up to the couch. Pick up the red pillow off the couch.\" the red pillow off the couch. Turn around and walk . . . to the chair.\" and walk . . . to the chair. Put the red pillow on the chair.\" Turn right and walk up to the couch. Pick up the red pillow off the couch. Turn around and walk . . . to the chair.\" Pick up the red pillow off the couch. Turn around and walk . . . to the chair. Put the red pillow on the chair.\"", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"text": "4", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"text": "", |
|
"content": "<table><tr><td>: Example high level multi-intent (1.), low-level single-intent (2.-5.) and low-level multi-intent (6.-7.)</td></tr><tr><td>tasks of type 'pick and place'. The model receives language commands (left col) together with relevant visual</td></tr><tr><td>information, and predicts an intent (right col). Top/middle are from the original ALFRED dataset. Bottom instances</td></tr><tr><td>from data augmentation.</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "Final data set statistics.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |