|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:58:03.929548Z" |
|
}, |
|
"title": "The Spoon is in the Sink: Assisting Visually Impaired People in the Kitchen", |
|
"authors": [ |
|
{ |
|
"first": "Katie", |
|
"middle": [], |
|
"last": "Baker", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Heriot-Watt University", |
|
"location": { |
|
"settlement": "Edinburgh", |
|
"country": "Scotland" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Parekh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Heriot-Watt University", |
|
"location": { |
|
"settlement": "Edinburgh", |
|
"country": "Scotland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Adrien", |
|
"middle": [], |
|
"last": "Fabre", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Heriot-Watt University", |
|
"location": { |
|
"settlement": "Edinburgh", |
|
"country": "Scotland" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Angus", |
|
"middle": [], |
|
"last": "Addlesee", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Heriot-Watt University", |
|
"location": { |
|
"settlement": "Edinburgh", |
|
"country": "Scotland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ruben", |
|
"middle": [], |
|
"last": "Kruiper", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Heriot-Watt University", |
|
"location": { |
|
"settlement": "Edinburgh", |
|
"country": "Scotland" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Oliver", |
|
"middle": [], |
|
"last": "Lemon", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Heriot-Watt University", |
|
"location": { |
|
"settlement": "Edinburgh", |
|
"country": "Scotland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Visual Question Answering (VQA) systems are increasingly adept at a variety of tasks, and this technology can be used to assist blind and partially sighted people. To do this, the system's responses must not only be accurate, but usable. It is also vital for assistive technologies to be designed with a focus on: (1) privacy, as the camera may capture a user's mail, medication bottles, or other sensitive information; (2) transparency, so that the system's behaviour can be explained and trusted by users; and (3) controllability, to tailor the system for a particular domain or user group. We have therefore extended a conversational VQA framework, called Aye-saac, with these objectives in mind. Specifically, we gave Aye-saac the ability to answer visual questions in the kitchen, a particularly challenging area for visually impaired people. Our system 1 can now answer questions about quantity, positioning, and system confidence in regards to 299 kitchen objects. Questions about the spatial relations between these objects are particularly helpful to visually impaired people, and our system output more usable answers than other state of the art end-to-end VQA systems.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Visual Question Answering (VQA) systems are increasingly adept at a variety of tasks, and this technology can be used to assist blind and partially sighted people. To do this, the system's responses must not only be accurate, but usable. It is also vital for assistive technologies to be designed with a focus on: (1) privacy, as the camera may capture a user's mail, medication bottles, or other sensitive information; (2) transparency, so that the system's behaviour can be explained and trusted by users; and (3) controllability, to tailor the system for a particular domain or user group. We have therefore extended a conversational VQA framework, called Aye-saac, with these objectives in mind. Specifically, we gave Aye-saac the ability to answer visual questions in the kitchen, a particularly challenging area for visually impaired people. Our system 1 can now answer questions about quantity, positioning, and system confidence in regards to 299 kitchen objects. Questions about the spatial relations between these objects are particularly helpful to visually impaired people, and our system output more usable answers than other state of the art end-to-end VQA systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Visual impairment can lead to seemingly unrelated health issues. Specifically, malnutrition has been associated with visual impairment because of the difficulties encountered when shopping for, preparing, and eating food (Chung et al., 2021; Jones et al., 2019) . One major issue is that preparing a meal involves various situations where visually impaired people feel unsafe. A lack of spatial awareness and depth perception, especially when using a knife or preparing hot meals, contributes to concerns about getting injured.", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 241, |
|
"text": "(Chung et al., 2021;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 242, |
|
"end": 261, |
|
"text": "Jones et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Another common concern is hygiene, e.g. the inability to read expiry dates, see dirt on vegetables, recognise mold on food or spot that meats are thoroughly cooked. As a result, most visually impaired people only prepare meals with the help from family members or carers -or simply do not prepare hot meals at all (Jones et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 334, |
|
"text": "(Jones et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this study, we explore the use of a Visual Question Answering (VQA) system to alleviate some of the issues that visually impaired people encounter in their kitchen. We describe Aye-saac, a voice assistant that can locate objects commonly found in a kitchen. The underlying architecture for Aye-saac was developed in 2020 by students at Heriot-Watt University and was designed to be both transparent and controllable, aligning with our needs. We extended the object detection capabilities from 30 to 299 types of kitchen objects. Further added functionality includes:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1. Object positioning in a scene, e.g. \"The spoon is in the sink\". We describe spatial relations to 'anchor' objects or a user's hands where possible -this avoids the use of other movable objects in the output, rendering the output useless, e.g. \"The spoon is next to the fork\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2. Handling queries on the quantity of objects within a scene, e.g. \"I count one carrot and two fish\". This is particularly useful for counting ingredients.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "3. Transparency of confidence scores for responses generated by Aye-saac, e.g. \"I am 72% certain that the spoon is in the sink\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We investigate how well Aye-saac handles the newly added user-intents, how well it detects the newly added kitchen objects, and compare the object detection capabilities against two dedicated end-to-end (E2E) VQA systems. We show that Aye-saac outperforms these two E2E systems when queried about object positioning by returning more descriptive and usable positioning information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2 Related Work", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A VQA system takes a natural language question and an image as input, aiming to reason over the contents and respond with a natural language utterance (Antol et al., 2015) . Recently, VQA systems that achieve state-of-the-art results have been trained E2E, an example is the Pythia system that won the 2018 VQA competition (Jiang et al., 2018) . While these systems provide improved performance, they lack the ability to explain why they generated a specific response and have to be trained on large datasets. A crowd-sourcing approach can provide VQA systems with the required training data to cover many subjects and handle common questions (Gurari et al., 2018) , but the objects or properties in an image must be within this data to be mapped correctly between the query and image (Antol et al., 2015) . As an example, consider Figure 1: if the model was not trained to recognise bananas, the system is unable to correctly handle queries related to bananas. This is a challenge when working within a very specific domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 171, |
|
"text": "(Antol et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 343, |
|
"text": "(Jiang et al., 2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 643, |
|
"end": 664, |
|
"text": "(Gurari et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 785, |
|
"end": 805, |
|
"text": "(Antol et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visual Question Answering", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The VizWiz Social (Brady et al., 2013) app was used by visually impaired people to collect pictures and questions about their surroundings. The resulting VizWiz dataset (Gurari et al., 2018) contains these images and questions with answers crowdsourced from sighted volunteers. Although the kitchen-specific subset is not large enough to train an E2E system upon, this study provides useful insight into the types of questions that are commonly asked by visually impaired people, and the kitchen was identified as a particularly challenging area. We used this dataset to direct our work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 38, |
|
"text": "(Brady et al., 2013)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 190, |
|
"text": "(Gurari et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visual Question Answering", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "To the best of our knowledge, there is no dataset available to train a VQA system that assists vi-sually impaired users in a kitchen setting. Some existing datasets to train VQA systems contain subsets of data that are situated in the kitchen, such as Embodied Questioning Answering (EQA) (Das et al., 2018) and Interactive Question Answering Dataset (IQUAD) (Gordon et al., 2018) . However, both EQA and IQUAD use computer-generated questions that are grounded on a synthetic 3D environment. Synthetic visual scenes do not have the randomness of real-life (Hudson and Manning, 2019) which biases the model towards certain environments. Questions also lack diversity due to a templative generation method, biasing the model further (Das et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 307, |
|
"text": "(Das et al., 2018)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 359, |
|
"end": 380, |
|
"text": "(Gordon et al., 2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 732, |
|
"end": 750, |
|
"text": "(Das et al., 2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visual Question Answering", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The lack of a domain-specific dataset complicates the use of an E2E approach (Zhao et al., 2019) . Instead, Aye-saac only relies on neural models for object detection and Natural Language Understanding (NLU). A rule-based approach is used to process the image and query, and to formulate a response in real-time. This makes it possible to tweak and extend Aye-saac in a controlled manner. Additionally, if a question is answered incorrectly, the system can provide reason for the response.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 96, |
|
"text": "(Zhao et al., 2019)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visual Question Answering", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Current object detection systems perform well at detecting entities, but are not robust at inferring the spatial relationships between them (Krishna et al., 2017) . This weakness stems from the available datasets, as they lack relative spatial positioning information and must implicitly infer them.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 162, |
|
"text": "(Krishna et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Spatial relationships", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "To address this limitation, Krishna et al. (2017) introduced the Visual Genome (VG) dataset, converting natural language descriptions of images to dense scene graphs that include spatial relationships and common descriptive attributes of entities. Datasets such as CompGuessWhat?! (CGW) (Suglia et al., 2020) and GQA (Hudson and Manning, 2019) extend object detection datasets by including dense scene graphs that contain additional situational and abstract attributes, and further include binary question-answer pairs grounded on the context of the scene (Suglia et al., 2020; Hudson and Manning, 2019) . However, models trained on these datasets are not as robust with zero-shot evaluation -where models attempt to reason about visual scenes with previously unseen entities (Suglia et al., 2020) , which can be dangerous in practice for visually impaired people who need to rely on the response.", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 49, |
|
"text": "Krishna et al. (2017)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 556, |
|
"end": 577, |
|
"text": "(Suglia et al., 2020;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 578, |
|
"end": 603, |
|
"text": "Hudson and Manning, 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 776, |
|
"end": 797, |
|
"text": "(Suglia et al., 2020)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Spatial relationships", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "There are a variety of commercial systems built to assist people with visual impairments. Some are applications, and others include specific hardware for the user. These systems fall into two categories: human-in-the-loop, or E2E. Human-in-the-loop systems connect visually impaired people to a volunteer, or staff member, that is ready to answer visual questions. Examples include: BeMyEyes, BeSpecular, and Aira -with varying costs and wait times. These systems are very time-efficient thanks to the ability to have a dialogue, hence our focus on handling conversational utterances and follow-up questions. Humanin-the-loop systems also enable people with visual impairments to ask questions that involve artistic, cultural, or timely importance -like asking about Banksy's work -which E2E approaches cannot do (Fleet et al., 2020) . There are several issues with this approach however, the major one being user privacy. The more affordable, and often free, services require untrained volunteers that receive images taken by visually impaired people. This is a huge security concern as the images may contain the user's name, address, medication, or children's photos in identifiable uniform (Fleet et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 813, |
|
"end": 833, |
|
"text": "(Fleet et al., 2020)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1194, |
|
"end": 1214, |
|
"text": "(Fleet et al., 2020)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Existing Assistants for Sight Impaired People", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "E2E systems, like TapTapSee and Microsoft Seeing AI are cloud services, so they do not have this privacy concern to the same extent. This concern becomes negligible if the system is open-source and can be set up at home, or keeps data on a device like OrCam MyEye. These E2E systems do also have their flaws however. They lack the mentioned ability to have a dialogue or understand culture, but more importantly, they cannot provide feedback on certainty. This makes it impossible to know whether the output is accurate. Similarly, it is very resource-intensive to tweak or extend E2E models, and a challenge to control specific behaviours (Samek et al., 2019) . For example, it would be beneficial for the E2E system to be more cautious when answering questions about medication. This of course is a balance with human-in-the-loop systems that would answer accurately, but provide a stranger with medical information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 640, |
|
"end": 660, |
|
"text": "(Samek et al., 2019)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Existing Assistants for Sight Impaired People", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "All of these systems offer general assistance to people with visual impairments, whereas we are concentrating on the kitchen domain. A system was recently developed which focused on improving the mealtime experience of visually impaired people after surveying them about their mealtime experiences (Chung et al., 2021) . A virtual reality (VR)-based prototype was created to address a major issue that visually impaired people experienced: getting information about the location of food. The study also highlighted the anxiety faced by people with visual impairments about disturbing others for information or assistance. An automated system, such as Aye-saac, reduces the reliance on sighted people for kitchen and food-related tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 298, |
|
"end": 318, |
|
"text": "(Chung et al., 2021)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Existing Assistants for Sight Impaired People", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "A dialogue system requires that an utterance, represented as text, is transformed into a meaningful representation for the Dialogue Manager (DM), thus enabling the formulation of a relevant response. In the context of conversational agents, this is known as NLU, which often refers to both identifying the intent of a user's input, and identifying which entities have been mentioned (Bunk et al., 2020) . To reduce error-propagation between these sub-tasks, a multi-task architecture has been proposed called the Dual Intent Entity Transformer (DIET) classifier (Bunk et al., 2020) . The joint modelling of the entity extraction and intent classification sub-tasks has been shown to improve performance, indicating that intent and entities closely interact with each other. Beyond improved accuracy, this model is faster to train than fine-tuning BERT for NLU.", |
|
"cite_spans": [ |
|
{ |
|
"start": 383, |
|
"end": 402, |
|
"text": "(Bunk et al., 2020)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 581, |
|
"text": "(Bunk et al., 2020)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Natural Language Understanding", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Aye-saac relies on Rasa's implementation of the DIET classifier to achieve state-of-the-art NLU results. Rasa is a set of open-source libraries that can be used to create conversational agents (Bocklisch et al., 2017) , and perform on par with paid NLU system, such as Microsoft's Language Understanding (LUIS) (Braun et al., 2017) . Rasa offers the typical advantages of self-hosted open-source software such as adaptability and data control. This privacy is particularly important when interacting with visually impaired people in their homes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 193, |
|
"end": 217, |
|
"text": "(Bocklisch et al., 2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 331, |
|
"text": "(Braun et al., 2017)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Natural Language Understanding", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Aye-saac is a modular and extensible conversational VQA framework that is implemented as a collection of independent microservices. Isolating each service allows Aye-saac to utilise concurrency when analysing image data; ensuring more intensive operations do not hinder the system from responding to other requests. Figure 2 shows the flow of data between all the individual services, using RabbitMQ to control their communication. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 316, |
|
"end": 324, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Aye-saac", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Some of Aye-saac's limitations within a kitchen environment were caused by its inability to detect common kitchen objects. Specifically, object detection was performed by a Single Shot Detector (SSD) with ResNet50 trained on the COCO dataset (Lin et al., 2020) . The COCO dataset contains 80 object classes, of which we would only expect 30 to be commonly found in a kitchen. Examples of irrelevant classes include 'traffic light' and 'giraffe'.", |
|
"cite_spans": [ |
|
{ |
|
"start": 242, |
|
"end": 260, |
|
"text": "(Lin et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Suitability for the Kitchen", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To improve Aye-saac's suitability in the kitchen, we combined the existing model with a baseline Faster R-CNN model, trained on the Epic-Kitchens-55 (EK) dataset (Damen et al., 2020) . The latter model can identify 290 distinct objects that are commonly found in a kitchen. By combining both models Aye-saac is able to detect 299 kitchen-relevant object classes: 21 classes occur in both datasets, 9 are unique to COCO, and 269 are unique to EK. We retained the COCO model due to its superior accuracy (see Section 5.2).", |
|
"cite_spans": [ |
|
{ |
|
"start": 162, |
|
"end": 182, |
|
"text": "(Damen et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Suitability for the Kitchen", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "One issue for visually impaired users is knowing whether they have enough ingredients for a specific recipe (Gurari et al., 2018) . Therefore, we introduced functionality to allow users to query the number of objects in a visual scene. The object detection only returns labels in singular form so we added a plural management feature to Aye-saac's NLU service. Therefore, \"How many eggs are there?\" can now be successfully answered and this plural management is extended across all existing intents related to object detection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 129, |
|
"text": "(Gurari et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Querying the Quantity", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The detection of objects can be imprecise and it is critical to communicate this uncertainty to visually impaired users. For example, if the confidence regarding the number of eggs on the table is only 51%, the user could ask where the eggs are to manually count them. We therefore implemented a new intent that allows users to ask Aye-saac how confident it is about an object it detected. The response reports the image classification score from the object detection model as the confidence, as shown in Figure 3 . It is important to note that the system could be 100% confident, but still be wrong.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 505, |
|
"end": 513, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Quantifying Confidence", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Visually impaired people experience issues with locating food during mealtimes and, when given assistance options, prefer to have dish locations verbally described (Chung et al., 2021) . We therefore developed spatial detection functionality to provide usable positioning information in relation to other objects. We created a list of 34 object classes taken from the COCO and EK datasets that we name 'anchors' -corresponding to large items that are not expected to move very often, and thus the user will likely already know where they are, e.g. a kitchen sink or an oven. Additionally, we specify the expected relationships between an anchor and a query object from a list of prepositions; for example, a fridge has the spatial relationships 'in', 'on' and 'next to' but not 'below'. We prioritise positioning of query objects in relation to these anchors but in the absence of anchors, we attempt to give the position relative to people or hands that were found in the picture. Failing this, we return the absolute position of the object in the image.", |
|
"cite_spans": [ |
|
{ |
|
"start": 164, |
|
"end": 184, |
|
"text": "(Chung et al., 2021)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relative Spatial Detection", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We use a small sample of the VizWiz images and questions to evaluate the performance of the object detection and VQA (Gurari et al., 2018) . Specifically, we used the VizWiz Dataset Browser (Bhattacharya and Gurari, 2019) to select images that are labelled as: suitable for object recognition, good quality, in the kitchen, with 10/10 confident answers. This gave a final evaluation set of 18 images with associated questions, asked by visually impaired people, and high confidence answers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 138, |
|
"text": "(Gurari et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 221, |
|
"text": "Gurari, 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation of Object Detection and VQA", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We used these images as input to the COCO and EK models and rated the resulting bounding boxes as correct or incorrect. We used the same images and questions to evaluate the VQA capabilities of Aye-saac and two E2E VQA models; Pythia (Jiang et al., 2018) and HieCoAtt (Lu et al., 2016) . For all three systems, we compared the generated answers to the human provided answers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 234, |
|
"end": 254, |
|
"text": "(Jiang et al., 2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 285, |
|
"text": "(Lu et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation of Object Detection and VQA", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To evaluate the spatial relations, we first generated a small test set of image-question pairs from the GQA dataset (Hudson and Manning, 2019). Using the scene graphs from the training data, we gathered all the colour images that had: the word \"kitchen\" in at least one of their scene objects, the semantic type 'relation', and began with the word \"where\". This gave a set of 18 image-question pairs which were asked to Aye-saac, Pythia, and HieCoAtt. The outputs were examined and rated as correct or incorrect and the usability of the spatial relationships were judged by the authors as usable or not in terms of whether the output could be used by a visually impaired user to locate the query object.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation of Spatial Relationships", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Following Rasa's evaluation guidelines, we generated the confusion matrix in Figure 4 . Our NLU performs well but we can deduce that the intents 'identify' and 'read text' are mistaken several times. This can be improved with more training data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 85, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "NLU", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Aye-saac relies on two pre-trained object detection models, the COCO model and the EK model. While the EK model is able to detect a much larger number of kitchen-specific objects than the COCO model, we found that the COCO model performed best on the image-question pairs detailed in Section 4.1. COCO identified 89.3% bounding boxes correctly, whereas the EK model identified 28.6%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Object Detection", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "There is a need for an object detection model that is able to accurately detect a large number of kitchen-specific objects. Currently the baseline EK model has been used in Aye-saac because the better performing models have not been released publicly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Object Detection", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Using the image-question pairs detailed in section 4.2 we tested how well Aye-saac determines the spatial relationships between queried objects. We rated the answers in terms of their correctness and usability. Overall, 77.8% of the answers were answered correctly and 66.7% of answers were deemed to be usable. 77.8% were answered using positioning relative to anchor objects, of which 71.4% were correct. Two of the questions were answered using positioning relative to people in the scene (100% answered correctly). The remaining questions were answered using absolute positioning in the image (100% answered correctly). The main reason for incorrect answers was multiple query bounding boxes resulting in stilted responses, e.g. \"I can see a sink and it's in the sink and a sink and it's left of the sink\". In one case the perspective of the objects was incorrectly interpreted, a microwave described as 'on' the dining table instead of 'in front of \" the dining table.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Spatial Relationships", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We compared Aye-saac with the E2E systems, Pythia and HieCoAtt, see Table 1 for results. Both the E2E systems returned answers that were not usable, e.g. describing an objects position in relation to the \"counter\" or \"kitchen\". While technically the objects were indeed in a kitchen or on a counter, these answers do not help locate the query objects in the scene. Following this criterion, Pythia responded to 27.8% of the spatial relation questions usefully, all for microwave locations \"above stove\", and HieCoAtt only gave usable answers to 5.6% of the questions. Aye-saac provides more detailed positions than the E2E systems by relating the position of objects to identified anchor points. The position detection could be further improved by accounting for multiple bounding boxes, e.g. in one case the location is reported in relation to four bounding boxes, one or two may be sufficient. Hardware changes could also improve the system, an addition of a Microsoft Kinect or stereo vision camera may help overcome depth perception issues. The camera could be placed at a high vantage point, but the object detection would then have trouble identifying objects as the EK model was trained on an egocentric dataset (Damen et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 1219, |
|
"end": 1239, |
|
"text": "(Damen et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 75, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Spatial Relationships", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Using the VizWiz image-question pairs detailed in section 4.1, we compared Aye-saac with two E2E systems -Pythia and HieCoAtt. When looking at accuracy alone, Pythia performed the best and answered 50% of the questions correctly, followed by HieCoAtt with 44%, and Aye-saac with 33%. Figure 5A illustrates that Aye-saac currently suffers from a lack of deeper understanding. Here, a picture of an apple with the question \"Is this a fruit or a vegetable?\" is answered incorrectly as Ayesaac does not understand that an apple is a fruit. The E2E systems however are able to answer the question correctly. To enable this understanding, rule-based VQA systems like Aye-saac could be integrated with large Knowledge Bases (KBs) and ontologies on particular topics, and common sense knowledge. Some of these are very large and actively developed by communities of experts in the KBs respective domain. A few cross-domain examples include Wikidata (Vrande\u010di\u0107 and Kr\u00f6tzsch, 2014) , ConceptNet (Liu and Singh, 2004) , and DBpedia (Auer et al., 2007) ; all part of the linked open data cloud (Auer et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 941, |
|
"end": 971, |
|
"text": "(Vrande\u010di\u0107 and Kr\u00f6tzsch, 2014)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 985, |
|
"end": 1006, |
|
"text": "(Liu and Singh, 2004)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1021, |
|
"end": 1040, |
|
"text": "(Auer et al., 2007)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1082, |
|
"end": 1101, |
|
"text": "(Auer et al., 2014)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 284, |
|
"end": 293, |
|
"text": "Figure 5A", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "General VQA", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "A benefit of Aye-saac over the E2E systems is support for multi-turn dialogue so that users can query the confidence of given answers. Aye-saac achieves this by temporarily storing detected objects in its state. This feature could be expanded to support additional follow up questions, e.g. \"What is this?\" followed by \"and what colour is it?\". Aye-saac's modular design enables the addition of functionalities, like textual VQA (Ramil Brick et al., 2021) , while E2E systems require retraining to cover additional objects or user queries. Figure 5 illustrates another difficulty answering object positioning questions, determining the 3D position of objects with 2D images. Due to this, Aye-saac responds that the microwave is \"on the oven and in the cupboard\". Distinguishing between 'on' and 'in' is a complex semantic issue (Coventry et al., 2001; Richard-Bollans et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 429, |
|
"end": 455, |
|
"text": "(Ramil Brick et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 828, |
|
"end": 851, |
|
"text": "(Coventry et al., 2001;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 852, |
|
"end": 881, |
|
"text": "Richard-Bollans et al., 2020)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 540, |
|
"end": 548, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "General VQA", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Blind and partially sighted people face many challenges in the kitchen. We presented our version of Aye-saac, a conversational VQA framework that aims to start tackling some of these challenges.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "While our system can still be improved, we have shown that Aye-saac: (1) provides more usable responses than Pythia and HieCoAtt when asked for an object's location; (2) is transparent in design and also when detailing its own confidence of a previous response; (3) can have a multi-turn interaction; (4) still has an accurate NLU intent classifier with the added functionality and plural handling; and (5) can answer questions about 299 kitchen objects.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We had planned to run a user evaluation with visually impaired people in an accessible kitchen. Unfortunately due to COVID, we had to cancel this. In future work we could train a more accurate kitchen-specific object detection model on the EK dataset, integrate common-sense knowledge using KGs, and more. Once complete, we hope that a human evaluation could take place.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Code and evaluation data can be found at: https://github.com/Aye-saac/aye-saac", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We couldn't have done this work without Julian Frabel, Noe Rivals, Guillaume Jeanne, Rachel Yu, Basile Lamarque, and Nejma Belkhanfar. The fourth author is funded by Wallscope and The Data Lab. Thank you all.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "7" |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "VQA: Visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Stanislaw", |
|
"middle": [], |
|
"last": "Antol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aishwarya", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiasen", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"Lawrence" |
|
], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE International Conference on Computer Vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2425--2433", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICCV.2015.279" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Mar- garet Mitchell, Dhruv Batra, C. Lawrence Zitnick, and Devi Parikh. 2015. VQA: Visual question an- swering. Proceedings of the IEEE International Con- ference on Computer Vision, pages 2425-2433.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Dbpedia: A nucleus for a web of open data", |
|
"authors": [ |
|
{ |
|
"first": "S\u00f6ren", |
|
"middle": [], |
|
"last": "Auer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Bizer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgi", |
|
"middle": [], |
|
"last": "Kobilarov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Lehmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Cyganiak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Ives", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "The semantic web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "722--735", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S\u00f6ren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. 2007. Dbpedia: A nucleus for a web of open data. In The semantic web, pages 722-735. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Linked Open Data-Creating Knowledge Out of Interlinked Data: Results of the LOD2 Project", |
|
"authors": [ |
|
{ |
|
"first": "S\u00f6ren", |
|
"middle": [], |
|
"last": "Auer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Volha", |
|
"middle": [], |
|
"last": "Bryl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Tramp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "8661", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S\u00f6ren Auer, Volha Bryl, and Sebastian Tramp. 2014. Linked Open Data-Creating Knowledge Out of In- terlinked Data: Results of the LOD2 Project, volume 8661. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "VizWiz dataset browser: A tool for visualizing machine learning datasets", |
|
"authors": [ |
|
{ |
|
"first": "Nilavra", |
|
"middle": [], |
|
"last": "Bhattacharya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danna", |
|
"middle": [], |
|
"last": "Gurari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nilavra Bhattacharya and Danna Gurari. 2019. VizWiz dataset browser: A tool for visualizing machine learning datasets. arXiv.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Rasa: Open source language understanding and dialogue management", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Bocklisch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joey", |
|
"middle": [], |
|
"last": "Faulkner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Pawlowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Nichol", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1712.05181" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Bocklisch, Joey Faulkner, Nick Pawlowski, and Alan Nichol. 2017. Rasa: Open source language understanding and dialogue management. arXiv e- prints, page arXiv:1712.05181.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Visual challenges in the everyday lives of blind people", |
|
"authors": [ |
|
{ |
|
"first": "Erin", |
|
"middle": [], |
|
"last": "Brady", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meredith", |
|
"middle": [ |
|
"Ringel" |
|
], |
|
"last": "Morris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Bigham", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the SIGCHI Conference on Human Factors in Computing Systems, CHI '13", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2117--2126", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2470654.2481291" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erin Brady, Meredith Ringel Morris, Yu Zhong, Samuel White, and Jeffrey P. Bigham. 2013. Vi- sual challenges in the everyday lives of blind people. In Proceedings of the SIGCHI Conference on Hu- man Factors in Computing Systems, CHI '13, page 2117-2126, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Evaluating natural language understanding services for conversational question answering systems", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Braun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrian", |
|
"middle": [ |
|
"Hernandez" |
|
], |
|
"last": "Mendez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Matthes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Langen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "174--185", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-5522" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Braun, Adrian Hernandez Mendez, Florian Matthes, and Manfred Langen. 2017. Evaluating natural language understanding services for conver- sational question answering systems. In Proceed- ings of the 18th Annual SIGdial Meeting on Dis- course and Dialogue, page 174-185, Saarbr\u00fccken, Germany. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "DIET: Lightweight language understanding for dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Tanja", |
|
"middle": [], |
|
"last": "Bunk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daksh", |
|
"middle": [], |
|
"last": "Varshneya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Vlasov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Nichol", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.09936" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tanja Bunk, Daksh Varshneya, Vladimir Vlasov, and Alan Nichol. 2020. DIET: Lightweight language understanding for dialogue systems. arXiv e-prints, page arXiv:2004.09936.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Improving mealtime experiences of people with visual impairments", |
|
"authors": [ |
|
{ |
|
"first": "Seunga", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soobin", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sohyeon", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyungyeon", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Uran", |
|
"middle": [], |
|
"last": "Oh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 18th International Web for All Conference, W4A '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3430263.3452421" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "SeungA Chung, Soobin Park, Sohyeon Park, Kyungyeon Lee, and Uran Oh. 2021. Improv- ing mealtime experiences of people with visual impairments. In Proceedings of the 18th Interna- tional Web for All Conference, W4A '21, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The interplay between geometry and function in the comprehension of over, under, above, and below", |
|
"authors": [ |
|
{ |
|
"first": "Kenny", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Coventry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Merc\u00e8", |
|
"middle": [], |
|
"last": "Prat-Sala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lynn", |
|
"middle": [], |
|
"last": "Richards", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Journal of Memory and Language", |
|
"volume": "44", |
|
"issue": "3", |
|
"pages": "376--398", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1006/jmla.2000.2742" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenny R. Coventry, Merc\u00e8 Prat-Sala, and Lynn Richards. 2001. The interplay between geometry and function in the comprehension of over, under, above, and below. Journal of Memory and Lan- guage, 44(3):376-398.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Toby Perrett, Will Price, et al. 2020. The EPIC-KITCHENS dataset: Collection, challenges and baselines", |
|
"authors": [ |
|
{ |
|
"first": "Dima", |
|
"middle": [], |
|
"last": "Damen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hazel", |
|
"middle": [], |
|
"last": "Doughty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni", |
|
"middle": [], |
|
"last": "Farinella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonino", |
|
"middle": [], |
|
"last": "Furnari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evangelos", |
|
"middle": [], |
|
"last": "Kazakos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Davide", |
|
"middle": [], |
|
"last": "Moltisanti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Munro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--1", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TPAMI.2020.2991965" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dima Damen, Hazel Doughty, Giovanni Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kaza- kos, Davide Moltisanti, Jonathan Munro, Toby Per- rett, Will Price, et al. 2020. The EPIC-KITCHENS dataset: Collection, challenges and baselines. IEEE Transactions on Pattern Analysis and Machine Intel- ligence, pages 1-1.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Embodied Question Answering", |
|
"authors": [ |
|
{ |
|
"first": "Abhishek", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samyak", |
|
"middle": [], |
|
"last": "Datta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgia", |
|
"middle": [], |
|
"last": "Gkioxari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2135--213509", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/CVPRW.2018.00279" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhishek Das, Samyak Datta, Georgia Gkioxari, Ste- fan Lee, Devi Parikh, and Dhruv Batra. 2018. Em- bodied Question Answering. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recog- nition Workshops (CVPRW), pages 2135-213509.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Vizwiz grand challenge workshop at cvpr 2020 -panel discussion with blind technology experts", |
|
"authors": [ |
|
{ |
|
"first": "Chancey", |
|
"middle": [], |
|
"last": "Fleet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cynthia", |
|
"middle": [], |
|
"last": "Bennett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Venkatesh", |
|
"middle": [], |
|
"last": "Potluri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chancey Fleet, Cynthia Bennett, and Venkatesh Potluri. 2020. Vizwiz grand challenge workshop at cvpr 2020 -panel discussion with blind technology ex- perts.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "IQA: Visual Question Answering in Interactive Environments", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gordon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aniruddha", |
|
"middle": [], |
|
"last": "Kembhavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Rastegari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Redmon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dieter", |
|
"middle": [], |
|
"last": "Fox", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4089--4098", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/CVPR.2018.00430" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Gordon, Aniruddha Kembhavi, Mohammad Rastegari, Joseph Redmon, Dieter Fox, and Ali Farhadi. 2018. IQA: Visual Question Answering in Interactive Environments. In 2018 IEEE/CVF Con- ference on Computer Vision and Pattern Recogni- tion, pages 4089-4098.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "VizWiz grand challenge: Answering visual questions from blind people", |
|
"authors": [ |
|
{ |
|
"first": "Danna", |
|
"middle": [], |
|
"last": "Gurari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qing", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abigale", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Stangl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anhong", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristen", |
|
"middle": [], |
|
"last": "Grauman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiebo", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Bigham", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danna Gurari, Qing Li, Abigale J. Stangl, Anhong Guo, Chi Lin, Kristen Grauman, Jiebo Luo, and Jef- frey P. Bigham. 2018. VizWiz grand challenge: An- swering visual questions from blind people. CoRR, abs/1802.08218.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "GQA: A new dataset for real-world visual reasoning and compositional question answering", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Drew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Hudson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6700--6709", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Drew A Hudson and Christopher D Manning. 2019. GQA: A new dataset for real-world visual reasoning and compositional question answering. In Proceed- ings of the IEEE/CVF Conference on Computer Vi- sion and Pattern Recognition (CVPR), pages 6700- 6709.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "An analysis of the impact of visual impairment on activities of daily living and visionrelated quality of life in a visually impaired adult population", |
|
"authors": [ |
|
{ |
|
"first": "Nabila", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannah", |
|
"middle": [ |
|
"Elizabeth" |
|
], |
|
"last": "Bartlett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Cooke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "British Journal of Visual Impairment", |
|
"volume": "37", |
|
"issue": "1", |
|
"pages": "50--63", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nabila Jones, Hannah Elizabeth Bartlett, and Richard Cooke. 2019. An analysis of the impact of visual impairment on activities of daily living and vision- related quality of life in a visually impaired adult population. British Journal of Visual Impairment, 37(1):50-63.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations", |
|
"authors": [ |
|
{ |
|
"first": "Ranjay", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuke", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oliver", |
|
"middle": [], |
|
"last": "Groth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Hata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Kravitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannis", |
|
"middle": [], |
|
"last": "Kalantidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li-Jia", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Shamma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Bernstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Journal of Computer Vision", |
|
"volume": "123", |
|
"issue": "1", |
|
"pages": "32--73", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s11263-016-0981-7" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin John- son, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Li Fei-Fei. 2017. Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations. Interna- tional Journal of Computer Vision, 123(1):32-73.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Tell-and-answer: Towards explainable visual question answering using attributes and captions", |
|
"authors": [ |
|
{ |
|
"first": "Qing", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianlong", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongfei", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Mei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiebo", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1338--1346", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1164" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qing Li, Jianlong Fu, Dongfei Yu, Tao Mei, and Jiebo Luo. 2018. Tell-and-answer: Towards explainable visual question answering using attributes and cap- tions. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1338-1346, Stroudsburg, PA, USA. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Kaiming He, and Piotr Doll\u00e1r. 2020. Focal Loss for Dense Object Detection", |
|
"authors": [ |
|
{ |
|
"first": "Tsung-Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Priya", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ross", |
|
"middle": [], |
|
"last": "Girshick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", |
|
"volume": "42", |
|
"issue": "2", |
|
"pages": "318--327", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TPAMI.2018.2858826" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Doll\u00e1r. 2020. Focal Loss for Dense Object Detection. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42(2):318-327.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Conceptnet-a practical commonsense reasoning tool-kit", |
|
"authors": [ |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Push", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "BT technology journal", |
|
"volume": "22", |
|
"issue": "4", |
|
"pages": "211--226", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hugo Liu and Push Singh. 2004. Conceptnet-a practi- cal commonsense reasoning tool-kit. BT technology journal, 22(4):211-226.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Hierarchical question-image co-attention for visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Jiasen", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianwei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.00061" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiasen Lu, Jianwei Yang, Dhruv Batra, and Devi Parikh. 2016. Hierarchical question-image co-attention for visual question answering. arXiv preprint arXiv:1606.00061.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Amit Parekh, Angus Addlesee, and Oliver Lemon. 2021. Am i allergic to this? assisting sight impaired people in the kitchen", |
|
"authors": [ |
|
{ |
|
"first": "Elisa", |
|
"middle": [ |
|
"Ramil" |
|
], |
|
"last": "Brick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vanesa", |
|
"middle": [ |
|
"Caballero" |
|
], |
|
"last": "Alonso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O'", |
|
"middle": [], |
|
"last": "Conor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheron", |
|
"middle": [], |
|
"last": "Brien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emilie", |
|
"middle": [], |
|
"last": "Tong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tavernier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elisa Ramil Brick, Vanesa Caballero Alonso, Conor O'Brien, Sheron Tong, Emilie Tavernier, Amit Parekh, Angus Addlesee, and Oliver Lemon. 2021. Am i allergic to this? assisting sight impaired peo- ple in the kitchen.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Categorisation, typicality & object-specific features in spatial referring expressions", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Richard-Bollans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luc\u00eda", |
|
"middle": [], |
|
"last": "G\u00f3mez\u00e1lvarez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Third International Workshop on Spatial Language Understanding, SpLU", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "39--49", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.splu-1.5" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Richard-Bollans, Anthony Cohn, and Luc\u00eda G\u00f3mez\u00c1lvarez. 2020. Categorisation, typicality & object-specific features in spatial referring ex- pressions. In Proceedings of the Third Interna- tional Workshop on Spatial Language Understand- ing, SpLU, pages 39-49, Stroudsburg, PA, USA. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Explainable AI: interpreting, explaining and visualizing deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Vedaldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lars", |
|
"middle": [ |
|
"Kai" |
|
], |
|
"last": "Hansen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "11700", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wojciech Samek, Gr\u00e9goire Montavon, Andrea Vedaldi, Lars Kai Hansen, and Klaus-Robert M\u00fcller. 2019. Explainable AI: interpreting, explaining and visual- izing deep learning, volume 11700. Springer Na- ture.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "CompGuess-What?!: A Multi-task Evaluation Framework for Grounded Language Learning", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Suglia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ioannis", |
|
"middle": [], |
|
"last": "Konstas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Vanzo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emanuele", |
|
"middle": [], |
|
"last": "Bastianelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Elliott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stella", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oliver", |
|
"middle": [], |
|
"last": "Lemon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7625--7641", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.682" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandro Suglia, Ioannis Konstas, Andrea Vanzo, Emanuele Bastianelli, Desmond Elliott, Stella Frank, and Oliver Lemon. 2020. CompGuess- What?!: A Multi-task Evaluation Framework for Grounded Language Learning. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7625-7641, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Wikidata: a free collaborative knowledgebase", |
|
"authors": [ |
|
{ |
|
"first": "Denny", |
|
"middle": [], |
|
"last": "Vrande\u010di\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Kr\u00f6tzsch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Communications of the ACM", |
|
"volume": "57", |
|
"issue": "10", |
|
"pages": "78--85", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Denny Vrande\u010di\u0107 and Markus Kr\u00f6tzsch. 2014. Wiki- data: a free collaborative knowledgebase. Commu- nications of the ACM, 57(10):78-85.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "A review of the research on dialogue management of task-oriented systems", |
|
"authors": [ |
|
{ |
|
"first": "Yin Jiang", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [ |
|
"Ling" |
|
], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "In Journal of Physics: Conference Series", |
|
"volume": "1267", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yin Jiang Zhao, Yan Ling Li, and Min Lin. 2019. A review of the research on dialogue management of task-oriented systems. In Journal of Physics: Con- ference Series, volume 1267, page 012025. IOP Pub- lishing.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Example image and question input to a Visual Question Answering (VQA) system, with generated response, fromAntol et al. (2015).", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": "The microservices and data-flow in Aye-saac.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "Illustration of how the NLG formulates a response, using the 'confidence' intent as an example.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"text": "Intent confusion matrix comparing predicted intents against true labels.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"text": "Example Aye-saac interactions", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"content": "<table><tr><td>System</td><td colspan=\"2\">Accuracy (%) Usability (%)</td></tr><tr><td>Aye-saac</td><td>77.8</td><td>66.7</td></tr><tr><td>Pythia</td><td>100.0</td><td>27.8</td></tr><tr><td>HieCoAtt</td><td>50</td><td>5.6</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Comparing accuracy and usability of Aye-saac spatial relationships versus E2E systems", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |